diff --git a/.azure-pipelines/cve_scan.yml b/.azure-pipelines/cve_scan.yml index 7c951df66f689..cbd024add1048 100644 --- a/.azure-pipelines/cve_scan.yml +++ b/.azure-pipelines/cve_scan.yml @@ -4,13 +4,14 @@ trigger: none pr: none -schedules: -- cron: "0 * * * *" - displayName: Hourly CVE scan - branches: - include: - - main - always: true +# This appears to be broken right now so disabling until it is fixed. +# schedules: +# - cron: "0 * * * *" +# displayName: Hourly CVE scan +# branches: +# include: +# - main +# always: true pool: vmImage: "ubuntu-18.04" diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 527305ef41d83..6bc20230bafe7 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -244,6 +244,8 @@ stages: matrix: api: CI_TARGET: "bazel.api" + api_compat: + CI_TARGET: "bazel.api_compat" gcc: CI_TARGET: "bazel.gcc" clang_tidy: @@ -310,6 +312,13 @@ stages: itemPattern: "bazel.release/envoy_binary.tar.gz" downloadType: single targetPath: $(Build.StagingDirectory) + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release" + itemPattern: "bazel.release/envoy-contrib_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) - task: DownloadBuildArtifacts@0 inputs: buildType: current @@ -317,10 +326,19 @@ stages: itemPattern: "bazel.release.arm64/envoy_binary.tar.gz" downloadType: single targetPath: $(Build.StagingDirectory) + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release.arm64" + itemPattern: "bazel.release.arm64/envoy-contrib_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) - bash: | set -e mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64 + tar zxf $(Build.StagingDirectory)/bazel.release/envoy-contrib_binary.tar.gz -C ./linux/amd64 mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64 + tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy-contrib_binary.tar.gz -C ./linux/arm64 ci/docker_ci.sh workingDirectory: $(Build.SourcesDirectory) env: diff --git a/.bazelrc b/.bazelrc index 2b2b0bad1f812..aa0bd78598253 100644 --- a/.bazelrc +++ b/.bazelrc @@ -76,6 +76,8 @@ build:asan --copt -fno-optimize-sibling-calls # Clang ASAN/UBSAN build:clang-asan --config=asan build:clang-asan --linkopt -fuse-ld=lld +build:clang-asan --linkopt --rtlib=compiler-rt +build:clang-asan --linkopt --unwindlib=libgcc # macOS ASAN/UBSAN build:macos --cxxopt=-std=c++17 @@ -220,7 +222,7 @@ build:remote --strategy=Javac=remote,sandboxed,local build:remote --strategy=Closure=remote,sandboxed,local build:remote --strategy=Genrule=remote,sandboxed,local build:remote --remote_timeout=7200 -build:remote --auth_enabled=true +build:remote --google_default_credentials=true build:remote --remote_download_toplevel # Windows bazel does not allow sandboxed as a spawn strategy @@ -229,7 +231,7 @@ build:remote-windows --strategy=Javac=remote,local build:remote-windows --strategy=Closure=remote,local build:remote-windows --strategy=Genrule=remote,local build:remote-windows --remote_timeout=7200 -build:remote-windows --auth_enabled=true +build:remote-windows --google_default_credentials=true build:remote-windows --remote_download_toplevel build:remote-clang --config=remote diff --git a/.dockerignore b/.dockerignore index ee59c60405f33..e0ef1fd120316 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,8 +3,8 @@ !/ci !/configs/google-vrp !/configs/*yaml -!/linux/amd64/build_release* -!/linux/arm64/build_release* +!/linux/amd64/build_*release* +!/linux/arm64/build_*release* !/local !/test/config/integration/certs !/windows diff --git a/.gitattributes b/.gitattributes index 895c1eeb76ad8..aefb99531aad2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,5 +1,4 @@ /docs/root/version_history/current.rst merge=union -/api/envoy/**/v4alpha/* linguist-generated=true /generated_api_shadow/envoy/** linguist-generated=true /generated_api_shadow/bazel/** linguist-generated=true *.svg binary diff --git a/.github/actions/pr_notifier/pr_notifier.py b/.github/actions/pr_notifier/pr_notifier.py index 54bf3e9fc568e..9c6277ddac81d 100644 --- a/.github/actions/pr_notifier/pr_notifier.py +++ b/.github/actions/pr_notifier/pr_notifier.py @@ -29,6 +29,21 @@ 'yanavlasov': 'UJHLR5KFS', 'asraa': 'UKZKCFRTP', 'davinci26': 'U013608CUDV', + 'rojkov': 'UH5EXLYQK', +} + +# First pass reviewers who are not maintainers should get +# notifications but not result in a PR not getting assigned a +# maintainer owner. +FIRST_PASS = { + 'adisuissa': 'UT17EMMTP', + 'dmitri-d': 'UB1883Q5S', + 'tonya11en': 'U989BG2CW', + 'esmet': 'U01BCGBUUAE', + 'KBaichoo': 'U016ZPU8KBK', + 'wbpcode': 'U017KF5C0Q6', + 'mathetake': 'UG9TD2FSB', + 'RyanTheOptimist': 'U01SW3JC8GP', } # Only notify API reviewers who aren't maintainers. @@ -73,19 +88,21 @@ def pr_message(pr_age, pr_url, pr_title, delta_days, delta_hours): # Adds reminder lines to the appropriate assignee to review the assigned PRs -# Returns true if one of the assignees is in the known_assignee_map, false otherwise. -def add_reminders(assignees, assignees_and_prs, message, known_assignee_map): - has_known_assignee = False +# Returns true if one of the assignees is in the primary_assignee_map, false otherwise. +def add_reminders( + assignees, assignees_and_prs, message, primary_assignee_map, first_pass_assignee_map): + has_primary_assignee = False for assignee_info in assignees: assignee = assignee_info.login - if assignee not in known_assignee_map: + if assignee in primary_assignee_map: + has_primary_assignee = True + elif assignee not in first_pass_assignee_map: continue - has_known_assignee = True if assignee not in assignees_and_prs.keys(): assignees_and_prs[ assignee] = "Hello, %s, here are your PR reminders for the day \n" % assignee assignees_and_prs[assignee] = assignees_and_prs[assignee] + message - return has_known_assignee + return has_primary_assignee # Returns true if the PR needs an LGTM from an API shephard. @@ -146,7 +163,7 @@ def track_prs(): message = pr_message(delta, pr_info.html_url, pr_info.title, delta_days, delta_hours) if (needs_api_review(labels, repo, pr_info)): - add_reminders(pr_info.assignees, api_review_and_prs, message, API_REVIEWERS) + add_reminders(pr_info.assignees, api_review_and_prs, message, API_REVIEWERS, []) # If the PR has been out-SLO for over a day, inform on-call if delta > datetime.timedelta(hours=get_slo_hours() + 36): @@ -154,7 +171,7 @@ def track_prs(): # Add a reminder to each maintainer-assigner on the PR. has_maintainer_assignee = add_reminders( - pr_info.assignees, maintainers_and_prs, message, MAINTAINERS) + pr_info.assignees, maintainers_and_prs, message, MAINTAINERS, FIRST_PASS) # If there was no maintainer, track it as unassigned. if not has_maintainer_assignee: @@ -214,3 +231,4 @@ def post_to_oncall(client, unassigned_prs, out_slo_prs): post_to_oncall(client, maintainers_and_messages['unassigned'], stalled_prs) post_to_assignee(client, shephards_and_messages, API_REVIEWERS) post_to_assignee(client, maintainers_and_messages, MAINTAINERS) + post_to_assignee(client, maintainers_and_messages, FIRST_PASS) diff --git a/.github/actions/pr_notifier/requirements.txt b/.github/actions/pr_notifier/requirements.txt index 67f910aa56516..2fa1aad74b299 100644 --- a/.github/actions/pr_notifier/requirements.txt +++ b/.github/actions/pr_notifier/requirements.txt @@ -111,9 +111,9 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via pynacl -slack-sdk==3.7.0 \ - --hash=sha256:50b9fd6d8f83af7e8ad6d8e76882d04931842241f85ccfd30da09b4a7b9b1516 \ - --hash=sha256:f0bf3e38ac393eba7fe1a99191b0e72f710860c6d2edc1271606fcfc08bea2e1 +slack-sdk==3.10.1 \ + --hash=sha256:f17b71a578e94204d9033bffded634475f4ca0a6274c6c7a4fd8a9cb0ac7cd8b \ + --hash=sha256:2b4dde7728eb4ff5a581025d204578ccff25a5d8f0fe11ae175e3ce6e074434f # via -r .github/actions/pr_notifier/requirements.txt urllib3==1.26.6 \ --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 85f49c528d5d5..36d284ee60378 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -12,7 +12,7 @@ updates: interval: "daily" - package-ecosystem: "pip" - directory: "/source/extensions/filters/network/kafka" + directory: "/contrib/kafka/filters/network/source" schedule: interval: "daily" @@ -36,21 +36,11 @@ updates: schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/tools/github" - schedule: - interval: "daily" - - package-ecosystem: "pip" directory: "/tools/config_validation" schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/tools/docker" - schedule: - interval: "daily" - - package-ecosystem: "pip" directory: "/tools/dependency" schedule: @@ -62,7 +52,7 @@ updates: interval: "daily" - package-ecosystem: "pip" - directory: "/tools/gpg" + directory: "/tools/distribution" schedule: interval: "daily" diff --git a/BUILD b/BUILD index 7ef982f5621bc..9e35562c085fb 100644 --- a/BUILD +++ b/BUILD @@ -33,3 +33,10 @@ package_group( "//test/extensions/...", ], ) + +package_group( + name = "contrib_library", + packages = [ + "//contrib/...", + ], +) diff --git a/CODEOWNERS b/CODEOWNERS index f84700c4bc384..1cb3aeacdede5 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -22,9 +22,7 @@ # original_src common extension extensions/filters/common/original_src @snowp @klarose # dubbo_proxy extension -/*/extensions/filters/network/dubbo_proxy @zyfjeff @lizan -# rocketmq_proxy extension -/*/extensions/filters/network/rocketmq_proxy @aaron-ai @lizhanhui @lizan +/*/extensions/filters/network/dubbo_proxy @zyfjeff @lizan @wbpcode # thrift_proxy extension /*/extensions/filters/network/thrift_proxy @zuercher @rgs1 # cdn_loop extension @@ -62,10 +60,6 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/tracers/xray @abaptiste @lavignes @mattklein123 # tracers.skywalking extension /*/extensions/tracers/skywalking @wbpcode @dio @lizan @Shikugawa -# mysql_proxy extension -/*/extensions/filters/network/mysql_proxy @rshriram @venilnoronha @mattklein123 -# postgres_proxy extension -/*/extensions/filters/network/postgres_proxy @fabriziomello @cpakulski @dio # quic extension /*/extensions/quic/ @alyssawilk @danzh2010 @mattklein123 @mpwarres @wu-bin @ggreenway # zookeeper_proxy extension @@ -110,7 +104,6 @@ extensions/filters/common/original_src @snowp @klarose # common crypto extension /*/extensions/common/crypto @lizan @bdecoste @asraa /*/extensions/common/proxy_protocol @alyssawilk @wez470 -/*/extensions/common/sqlutils @cpakulski @dio /*/extensions/filters/http/grpc_http1_bridge @snowp @jose /*/extensions/filters/http/gzip @gsagula @dio /*/extensions/filters/http/fault @rshriram @alyssawilk @@ -119,7 +112,6 @@ extensions/filters/common/original_src @snowp @klarose /*/extensions/filters/http/router @alyssawilk @mattklein123 @snowp /*/extensions/filters/http/grpc_web @fengli79 @lizan /*/extensions/filters/http/grpc_stats @kyessenov @lizan -/*/extensions/filters/http/squash @yuval-k @alyssawilk /*/extensions/filters/common/original_src @klarose @snowp /*/extensions/filters/listener/tls_inspector @ggreenway @asraa /*/extensions/grpc_credentials/example @wozz @htuch @@ -195,8 +187,19 @@ extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp # set_metadata extension /*/extensions/filters/http/set_metadata @aguinet @snowp # Formatters +/*/extensions/formatter/metadata @cpakulski @lizan /*/extensions/formatter/req_without_query @dio @tsaarni # IP address input matcher /*/extensions/matching/input_matchers/ip @aguinet @snowp -# Kafka -/*/extensions/filters/network/kafka @mattklein123 @adamkotwasinski +# Key Value store +/*/extensions/key_value @alyssawilk @ryantheoptimist + +# Contrib +/contrib/exe/ @mattklein123 @lizan +/contrib/common/sqlutils/ @cpakulski @dio +/contrib/squash/ @yuval-k @alyssawilk +/contrib/kafka/ @mattklein123 @adamkotwasinski +/contrib/rocketmq_proxy/ @aaron-ai @lizhanhui @lizan +/contrib/mysql_proxy/ @rshriram @venilnoronha +/contrib/postgres_proxy/ @fabriziomello @cpakulski @dio +/contrib/sxg/ @cpapazian @rgs1 @alyssawilk diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 38cded6eaf8b5..c59d73c0e7222 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -262,7 +262,17 @@ Other changes will likely include * Editing [source/extensions/extensions_metadata.yaml](source/extensions/extensions_metadata.yaml) to include metadata for the new extensions * Editing [docs/root/api-v3/config/config.rst](docs/root/api-v3/config/config.rst) to add area/area * Adding `docs/root/api-v3/config/area/area.rst` to add a table of contents for the API docs - * Adding `source/extensions/area/well_known_names.h` for registered plugins + +# Adding contrib extensions + +See [EXTENSION_POLICY.md](EXTENSION_POLICY.md) for more information on contrib. Adding a contrib +extension mostly mirrors adding a normal extension above. Some differences are noted here: + +* API files should be added in `api/contrib/envoy/`, but the protos' namespaces should still be as + in normal extensions (which will make file movement easier later if the extension gets promoted + to core). +* Build config and metadata should be included in [contrib/contrib_build_config.bzl](contrib/contrib_build_config.bzl) + and [contrib/extensions_metadata.yaml](contrib/extensions_metadata.yaml). # DCO: Sign your work diff --git a/EXTENSION_POLICY.md b/EXTENSION_POLICY.md index 39d41f14f875f..7ef47bcd6cf13 100644 --- a/EXTENSION_POLICY.md +++ b/EXTENSION_POLICY.md @@ -137,3 +137,16 @@ case we need to install an extension point, which can be done as follows: 3. Update [extending envoy](docs/root/extending/extending.rst) to list the new extension point and add any documentation explaining the extension point. At the very least this should link to the corresponding proto. + +## Contrib extensions + +As described in [this document](https://docs.google.com/document/d/1yl7GOZK1TDm_7vxQvt8UQEAu07UQFru1uEKXM6ZZg_g/edit#), +Envoy allows an alternate path to adding extensions called `contrib/`. The barrier to entry for a +contrib extension is lower than a core extension, with the tradeoff that contrib extensions are not +included by default in the main image builds. Consumers need to pull directly from the contrib +images described in the installation guide. Please read the linked document in detail to determine +whether contrib extensions are the right choice for a newly proposed extension. + +**NOTE:** Contrib extensions are not eligible for Envoy security team coverage. +**NOTE:** As per the linked Google Doc, contrib extensions generally should use `v3alpha` to avoid +requiring API shepherd reviews. diff --git a/OWNERS.md b/OWNERS.md index 69a60b4a6f387..74100d89c40be 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -45,6 +45,8 @@ routing PRs, questions, etc. to the right place. * Event management, security, performance, data plane. * Sotiris Nanopoulos ([davinci26](https://github.com/davinci26)) (Sotiris.Nanopoulos@microsoft.com) * Windows, low level networking. +* Dmitry Rozhkov ([rojkov](https://github.com/rojkov)) (dmitry.rozhkov@intel.com) + * Scalability and performance. # Senior extension maintainers @@ -54,11 +56,15 @@ without further review. * Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com) * Wasm +* Raúl Gutiérrez Segalés ([rgs1](https://github.com/rgs1)) (rgs@pinterest.com) + * Thrift # Envoy security team -* All maintainers -* Tony Allen ([tonya11en](https://github.com/tonya11en)) (tallen@lyft.com) +* All senior maintainers +* Tony Allen ([tonya11en](https://github.com/tonya11en)) (tony@allen.gg) +* Dmitri Dolguikh ([dmitri-d](https://github.com/dmitri-d)) (ddolguik@redhat.com) +* Yan Avlasov ([yanavlasov](https://github.com/yanavlasov)) (yavlasov@google.com) # Emeritus maintainers @@ -76,5 +82,3 @@ matter expert reviews. Feel free to loop them in as needed. * v2 xDS, listeners, filter chain discovery service. * Michael Payne ([moderation](https://github.com/moderation)) (m@m17e.org) * External dependencies, Envoy's supply chain and documentation. -* Dmitry Rozhkov ([rojkov](https://github.com/rojkov)) (dmitry.rozhkov@intel.com) - * Scalability and performance. diff --git a/README.md b/README.md index 8826efb9e4c3d..210048e28753d 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ The Envoy team meets twice per month on Tuesday at 9am PT. The public Google calendar is here: https://goo.gl/PkDijT * Meeting minutes are [here](https://goo.gl/5Cergb) -* Recorded videos are posted [here](https://www.youtube.com/channel/UCvqbFHwN-nwalWPjPUKpvTA/videos?view=0&sort=dd&shelf_id=1) +* Recorded videos are posted [here](https://www.youtube.com/channel/UC5z5mvPgqMs1xo5VuIWzYTA) ## Security diff --git a/REPO_LAYOUT.md b/REPO_LAYOUT.md index b8f509b2194c9..070c3d6683753 100644 --- a/REPO_LAYOUT.md +++ b/REPO_LAYOUT.md @@ -13,12 +13,13 @@ are: * [docs/](docs/): End user facing Envoy proxy and data plane API documentation as well as scripts for publishing final docs during releases. * [examples/](examples/): Larger Envoy examples using Docker and Docker Compose. -* [include/](include/): "Public" interface headers for "core" Envoy. In general, +* [envoy/](envoy/): "Public" interface headers for "core" Envoy. In general, these are almost entirely 100% abstract classes. There are a few cases of not-abstract classes in the "public" headers, typically for performance reasons. Note that "core" includes some "extensions" such as the HTTP connection manager filter and associated functionality which are so fundamental to Envoy that they will likely never be optional from a compilation perspective. * [restarter/](restarter/): Envoy's hot restart wrapper Python script. +* [security/](security/): Some templates for reporting security issues of Envoy. Historical security issues can also be found here. * [source/](source/): Source code for core Envoy as well as extensions. The layout of this directory is discussed in further detail below. * [support/](support/): Development support scripts (pre-commit Git hooks, etc.) @@ -65,7 +66,6 @@ Not every directory within test is described below, but a few highlights: We maintain a very specific code and namespace layout for extensions. This aids in discovering code/extensions, and allows us specify extension owners in [CODEOWNERS](CODEOWNERS). - * All extensions are either registered in [all_extensions.bzl](source/extensions/all_extensions.bzl) or [extensions_build_config.bzl](source/extensions/extensions_build_config.bzl). The former is for extensions that cannot be removed from the primary Envoy build. The latter is for extensions @@ -75,13 +75,13 @@ code/extensions, and allows us specify extension owners in [CODEOWNERS](CODEOWNE * These are the top level extension directories and associated namespaces: * [access_loggers/](/source/extensions/access_loggers): Access log implementations which use the `Envoy::Extensions::AccessLoggers` namespace. - * [bootstrap](/source/extensions/bootstrap): Bootstrap extensions which use + * [bootstrap/](/source/extensions/bootstrap): Bootstrap extensions which use the `Envoy::Extensions::Bootstrap` namespace. - * [clusters](/source/extensions/clusters): Cluster extensions which use the + * [clusters/](/source/extensions/clusters): Cluster extensions which use the `Envoy::Extensions::Clusters` namespace. - * [compression](/source/extensions/compression): Compression extensions + * [compression/](/source/extensions/compression): Compression extensions which use `Envoy::Extensions::Compression` namespace. - * [fatal_actions](/source/extensions/fatal_actions): Fatal Action extensions + * [fatal_actions/](/source/extensions/fatal_actions): Fatal Action extensions which use the `Envoy::Extensions::FatalActions` namespace. * [filters/http/](/source/extensions/filters/http): HTTP L7 filters which use the `Envoy::Extensions::HttpFilters` namespace. @@ -89,19 +89,19 @@ code/extensions, and allows us specify extension owners in [CODEOWNERS](CODEOWNE `Envoy::Extensions::ListenerFilters` namespace. * [filters/network/](/source/extensions/filters/network): L4 network filters which use the `Envoy::Extensions::NetworkFilters` namespace. - * [formatters](/source/extensions/formatters): Access log formatters which use the + * [formatters/](/source/extensions/formatters): Access log formatters which use the `Envoy::Extensions::Formatters` namespace. * [grpc_credentials/](/source/extensions/grpc_credentials): Custom gRPC credentials which use the `Envoy::Extensions::GrpcCredentials` namespace. * [health_checker/](/source/extensions/health_checker): Custom health checkers which use the `Envoy::Extensions::HealthCheckers` namespace. - * [internal_redirect](/source/extensions/internal_redirect): Internal Redirect + * [internal_redirect/](/source/extensions/internal_redirect): Internal Redirect extensions which use the `Envoy::Extensions::InternalRedirect` namespace. - * [quic_listeners](/source/extensions/quic_listeners): QUIC extensions which + * [quic_listeners/](/source/extensions/quic_listeners): QUIC extensions which use the `Envoy::Quic` namespace. - * [resource_monitors](/source/extensions/resource_monitors): Resource monitor + * [resource_monitors/](/source/extensions/resource_monitors): Resource monitor extensions which use the `Envoy::Extensions::ResourceMonitors` namespace. - * [retry](/source/extensions/retry): Retry extensions which use the + * [retry/](/source/extensions/retry): Retry extensions which use the `Envoy::Extensions::Retry` namespace. * [stat_sinks/](/source/extensions/stat_sinks): Stat sink implementations which use the `Envoy::Extensions::StatSinks` namespace. @@ -109,11 +109,11 @@ code/extensions, and allows us specify extension owners in [CODEOWNERS](CODEOWNE `Envoy::Extensions::Tracers` namespace. * [transport_sockets/](/source/extensions/transport_sockets): Transport socket implementations which use the `Envoy::Extensions::TransportSockets` namespace. - * [upstreams](/source/extensions/upstreams): Upstream extensions use the + * [upstreams/](/source/extensions/upstreams): Upstream extensions use the `Envoy::Extensions::Upstreams` namespace. - * [watchdog](/source/extensions/watchdog): Watchdog extensions use the + * [watchdog/](/source/extensions/watchdog): Watchdog extensions use the `Envoy::Extensions::Watchdog` namespace. - * [descriptors](/source/extensions/rate_limit_descriptors): Rate limit + * [rate_limit_descriptors/](/source/extensions/rate_limit_descriptors): Rate limit descriptor extensions use the `Envoy::Extensions::RateLimitDescriptors` namespace. * Each extension is contained wholly in its own namespace. E.g., @@ -123,3 +123,14 @@ code/extensions, and allows us specify extension owners in [CODEOWNERS](CODEOWNE code that is used by both HTTP and network filters. Common code used only by two HTTP filters would be found in `filters/http/common/`. Common code should be placed in a common namespace. E.g., `Envoy::Extensions::Filters::Common`. + +## [contrib](contrib/) layout + +This directory contains contrib extensions. See [EXTENSION_POLICY.md](EXTENSION_POLICY.md) for +more information. + +* [contrib/exe/](contrib/exe/): The default executable for contrib. This is similar to the + `envoy-static` target but also includes all contrib extensions, and is used to produce the + contrib image targets. +* [contrib/...](contrib/): The rest of this directory mirrors the [source/extensions](source/extensions/) + layout. Contrib extensions are placed here. diff --git a/SECURITY.md b/SECURITY.md index 62a1424a1a983..54fe4981bf431 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -93,6 +93,8 @@ detect issues during their execution on ClusterFuzz. A soak period of 7 days pro guarantee, since we will invoke the security release process for medium or higher severity issues for these older bugs. +**NOTE:** Contrib extensions are not eligible for Envoy security team coverage. + ### Threat model See https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/threat_model. diff --git a/STYLE.md b/STYLE.md index 624023602fb8e..bc45c15a9d921 100644 --- a/STYLE.md +++ b/STYLE.md @@ -24,7 +24,7 @@ # Deviations from Google C++ style guidelines * Exceptions are allowed and encouraged where appropriate. When using exceptions, do not add - additional error handing that cannot possibly happen in the case an exception is thrown. + additional error handling that cannot possibly happen in the case an exception is thrown. * Do use exceptions for: - Configuration ingestion error handling. Invalid configurations (dynamic and static) should throw meaningful `EnvoyException`s, the configuration diff --git a/api/API_VERSIONING.md b/api/API_VERSIONING.md index 3f5d41e710a9c..f864619602a1d 100644 --- a/api/API_VERSIONING.md +++ b/api/API_VERSIONING.md @@ -68,10 +68,12 @@ experience a backward compatible break on a change. Specifically: structurally or by documentation. An exception to the above policy exists for: -* Changes made within 14 days of the introduction of a new API field or message. +* Changes made within 14 days of the introduction of a new API field or message, provided the new field +or message has not been included in an Envoy release. * API versions tagged `vNalpha`. Within an alpha major version, arbitrary breaking changes are allowed. * Any field, message or enum with a `[#not-implemented-hide:..` comment. * Any proto with a `(udpa.annotations.file_status).work_in_progress` option annotation. +* Any proto marked as [#alpha:]. Note that changes to default values for wrapped types, e.g. `google.protobuf.UInt32Value` are not governed by the above policy. Any management server requiring stability across Envoy API or diff --git a/api/BUILD b/api/BUILD index a70eae799d797..5bbde32946b63 100644 --- a/api/BUILD +++ b/api/BUILD @@ -18,8 +18,6 @@ proto_library( "//envoy/api/v2/ratelimit:pkg", "//envoy/api/v2/route:pkg", "//envoy/config/bootstrap/v2:pkg", - "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/filter/fault/v2:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", @@ -59,6 +57,12 @@ proto_library( name = "v3_protos", visibility = ["//visibility:public"], deps = [ + "//contrib/envoy/extensions/filters/http/squash/v3:pkg", + "//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg", + "//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg", + "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", + "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", + "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", @@ -96,6 +100,7 @@ proto_library( "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", + "//envoy/extensions/common/key_value/v3:pkg", "//envoy/extensions/common/matching/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", @@ -145,7 +150,6 @@ proto_library( "//envoy/extensions/filters/http/rbac/v3:pkg", "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/set_metadata/v3:pkg", - "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", @@ -161,15 +165,11 @@ proto_library( "//envoy/extensions/filters/network/echo/v3:pkg", "//envoy/extensions/filters/network/ext_authz/v3:pkg", "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", - "//envoy/extensions/filters/network/kafka_broker/v3:pkg", "//envoy/extensions/filters/network/local_ratelimit/v3:pkg", "//envoy/extensions/filters/network/mongo_proxy/v3:pkg", - "//envoy/extensions/filters/network/mysql_proxy/v3:pkg", - "//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", - "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", @@ -180,6 +180,7 @@ proto_library( "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", + "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", "//envoy/extensions/health_checkers/redis/v3:pkg", "//envoy/extensions/http/header_formatters/preserve_case/v3:pkg", @@ -188,6 +189,7 @@ proto_library( "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", + "//envoy/extensions/key_value/file_based/v3:pkg", "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", "//envoy/extensions/matching/input_matchers/ip/v3:pkg", @@ -255,3 +257,11 @@ proto_library( ":v3_protos", ], ) + +filegroup( + name = "proto_breaking_change_detector_buf_config", + srcs = [ + "buf.yaml", + ], + visibility = ["//visibility:public"], +) diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl index 74e19f831179f..ef92aa45f0064 100644 --- a/api/bazel/repositories.bzl +++ b/api/bazel/repositories.bzl @@ -47,6 +47,11 @@ def api_dependencies(): name = "opentelemetry_proto", build_file_content = OPENTELEMETRY_LOGS_BUILD_CONTENT, ) + external_http_archive( + name = "com_github_bufbuild_buf", + build_file_content = BUF_BUILD_CONTENT, + tags = ["manual"], + ) PROMETHEUSMETRICS_BUILD_CONTENT = """ load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") @@ -150,3 +155,17 @@ go_proto_library( visibility = ["//visibility:public"], ) """ + +BUF_BUILD_CONTENT = """ +package( + default_visibility = ["//visibility:public"], +) + +filegroup( + name = "buf", + srcs = [ + "@com_github_bufbuild_buf//:bin/buf", + ], + tags = ["manual"], # buf is downloaded as a linux binary; tagged manual to prevent build for non-linux users +) +""" diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 968c6a9ffa286..be1e9c9789e4b 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -44,9 +44,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_desc = "xDS API Working Group (xDS-WG)", project_url = "https://github.com/cncf/xds", # During the UDPA -> xDS migration, we aren't working with releases. - version = "b88cc788a63e5b38ee334a2e702c67901355ae2c", - sha256 = "3220df8564f217665b6e17776569c5f748178c2b9cbf83bb55a13ddc0a3738f0", - release_date = "2021-03-23", + version = "dd25fe81a44506ab21ea666fb70b3b1c4bb183ee", + sha256 = "9184235cd31272679e4c7f9232c341d4ea75351ded74d3fbba28b05c290bfa71", + release_date = "2021-07-22", strip_prefix = "xds-{version}", urls = ["https://github.com/cncf/xds/archive/{version}.tar.gz"], use_category = ["api"], @@ -118,4 +118,16 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/v{version}.tar.gz"], use_category = ["api"], ), + com_github_bufbuild_buf = dict( + project_name = "buf", + project_desc = "A new way of working with Protocol Buffers.", # Used for breaking change detection in API protobufs + project_url = "https://buf.build", + version = "0.53.0", + sha256 = "888bb52d358e34a8d6a57ecff426bed896bdf478ad13c78a70a9e1a9a2d75715", + strip_prefix = "buf", + urls = ["https://github.com/bufbuild/buf/releases/download/v{version}/buf-Linux-x86_64.tar.gz"], + release_date = "2021-08-25", + use_category = ["api"], + tags = ["manual"], + ), ) diff --git a/api/buf.lock b/api/buf.lock new file mode 100644 index 0000000000000..046331375ce01 --- /dev/null +++ b/api/buf.lock @@ -0,0 +1,52 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: beta + repository: opencensus + branch: main + commit: 5f5f8259293649d68707d2e5b6285748 + digest: b1-myYwcdM0Xu05qIwhiy4eWEcARYUuZZ1vTbYvrrHu1mU= + create_time: 2021-03-03T20:50:42.079743Z + - remote: buf.build + owner: beta + repository: opentelemetry + branch: main + commit: 549da630ffe24b53be3983fcee3bb346 + digest: b1-HVAvWKH61BF6TdZSbHRhrD2SUuC0V7uAlZgCRimGPLI= + create_time: 2021-08-09T14:24:57.923964Z + - remote: buf.build + owner: beta + repository: prometheus + branch: main + commit: a91b42d18a994cd4b07b37f365f87cf9 + digest: b1-uKmv58fyoNwJI855qg7UEagfdyUl6XNPsFAdDoi57f4= + create_time: 2021-06-23T20:16:58.410272Z + - remote: buf.build + owner: beta + repository: protoc-gen-validate + branch: main + commit: 82388a0a0cb04e98a203f95dfed5e84b + digest: b1-lYgUMN58PxyCwvfQoopp40AJ-oHHjWXAzksF7v9U-U4= + create_time: 2021-06-21T22:00:30.152545Z + - remote: buf.build + owner: beta + repository: xds + branch: main + commit: 45f850b92541434cbde4aece01bc7d53 + digest: b1-QZUL5DC6-nVgMMlajH_hlImwghg5HjRsqlEAOl0dZgI= + create_time: 2021-08-09T14:37:06.872899Z + - remote: buf.build + owner: gogo + repository: protobuf + branch: main + commit: 4df00b267f944190a229ce3695781e99 + digest: b1-sjLgsg7CzrkOrIjBDh3s-l0aMjE6oqTj85-OsoopKAw= + create_time: 2021-08-10T00:14:28.345069Z + - remote: buf.build + owner: googleapis + repository: googleapis + branch: main + commit: d1a849b8f8304950832335723096e954 + digest: b1-zJkwX0YeOp1Wa0Jaj_RqMLa2-oEzePH6PJEK8aaMeI4= + create_time: 2021-08-26T15:07:19.652533Z diff --git a/api/buf.yaml b/api/buf.yaml new file mode 100644 index 0000000000000..781f01f972ced --- /dev/null +++ b/api/buf.yaml @@ -0,0 +1,19 @@ +version: v1beta1 +deps: + - buf.build/googleapis/googleapis + - buf.build/beta/opencensus + - buf.build/beta/prometheus + - buf.build/beta/opentelemetry + - buf.build/gogo/protobuf + - buf.build/beta/xds +breaking: + ignore_unstable_packages: true + use: + - FIELD_SAME_ONEOF + - FIELD_SAME_JSON_NAME + - FIELD_SAME_NAME + - FIELD_SAME_TYPE + - FIELD_SAME_LABEL + - FILE_SAME_PACKAGE + - FIELD_NO_DELETE_UNLESS_NUMBER_RESERVED + - FIELD_NO_DELETE_UNLESS_NAME_RESERVED diff --git a/api/envoy/extensions/filters/http/squash/v3/BUILD b/api/contrib/envoy/extensions/filters/http/squash/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/http/squash/v3/BUILD rename to api/contrib/envoy/extensions/filters/http/squash/v3/BUILD diff --git a/api/envoy/extensions/filters/http/squash/v3/squash.proto b/api/contrib/envoy/extensions/filters/http/squash/v3/squash.proto similarity index 100% rename from api/envoy/extensions/filters/http/squash/v3/squash.proto rename to api/contrib/envoy/extensions/filters/http/squash/v3/squash.proto diff --git a/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/BUILD b/api/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD similarity index 82% rename from generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/BUILD rename to api/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD index d500cc41da1fe..3ca8242f77801 100644 --- a/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/BUILD +++ b/api/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD @@ -6,7 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/trace/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto b/api/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto new file mode 100644 index 0000000000000..b9efc278e6de8 --- /dev/null +++ b/api/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.sxg.v3alpha; + +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.sxg.v3alpha"; +option java_outer_classname = "SxgProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Signed HTTP Exchange Filter] +// SXG :ref:`configuration overview `. +// [#extension: envoy.filters.http.sxg] + +// [#next-free-field: 10] +message SXG { + // The SDS configuration for the public key data for the SSL certificate that will be used to sign the + // SXG response. + transport_sockets.tls.v3.SdsSecretConfig certificate = 1; + + // The SDS configuration for the private key data for the SSL certificate that will be used to sign the + // SXG response. + transport_sockets.tls.v3.SdsSecretConfig private_key = 2; + + // The duration for which the generated SXG package will be valid. Default is 604800s (7 days in seconds). + // Note that in order to account for clock skew, the timestamp will be backdated by a day. So, if duration + // is set to 7 days, that will be 7 days from 24 hours ago (6 days from now). Also note that while 6/7 days + // is appropriate for most content, if the downstream service is serving Javascript, or HTML with inline + // Javascript, 1 day (so, with backdated expiry, 2 days, or 172800 seconds) is more appropriate. + google.protobuf.Duration duration = 3; + + // The SXG response payload is Merkle Integrity Content Encoding (MICE) encoded (specification is [here](https://datatracker.ietf.org/doc/html/draft-thomson-http-mice-03)) + // This value indicates the record size in the encoded payload. The default value is 4096. + uint64 mi_record_size = 4; + + // The URI of certificate CBOR file published. Since it is required that the certificate CBOR file + // be served from the same domain as the SXG document, this should be a relative URI. + string cbor_url = 5 [(validate.rules).string = {min_len: 1 prefix: "/"}]; + + // URL to retrieve validity data for signature, a CBOR map. See specification [here](https://tools.ietf.org/html/draft-yasskin-httpbis-origin-signed-exchanges-impl-00#section-3.6) + string validity_url = 6 [(validate.rules).string = {min_len: 1 prefix: "/"}]; + + // Header that will be set if it is determined that the client can accept SXG (typically `accept: application/signed-exchange;v=b3) + // If not set, filter will default to: `x-client-can-accept-sxg` + string client_can_accept_sxg_header = 7 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true} + ]; + + // Header set by downstream service to signal that the response should be transformed to SXG If not set, + // filter will default to: `x-should-encode-sxg` + string should_encode_sxg_header = 8 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true} + ]; + + // Headers that will be stripped from the SXG document, by listing a prefix (i.e. `x-custom-` will cause + // all headers prefixed by `x-custom-` to be omitted from the SXG document) + repeated string header_prefix_filters = 9 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; +} diff --git a/api/envoy/extensions/filters/network/kafka_broker/v3/BUILD b/api/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/network/kafka_broker/v3/BUILD rename to api/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD diff --git a/api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/api/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto similarity index 100% rename from api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto rename to api/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto diff --git a/api/envoy/extensions/filters/network/mysql_proxy/v3/BUILD b/api/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/network/mysql_proxy/v3/BUILD rename to api/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD diff --git a/api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/api/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto similarity index 100% rename from api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto rename to api/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto diff --git a/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD similarity index 100% rename from api/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD rename to api/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD diff --git a/api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto b/api/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto similarity index 100% rename from api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto rename to api/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD b/api/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD similarity index 100% rename from api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD rename to api/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/api/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto similarity index 94% rename from generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto rename to api/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto index c354b629bea28..12438751fada6 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto +++ b/api/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.network.rocketmq_proxy.v3; -import "envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; +import "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; import "google/protobuf/duration.proto"; diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/api/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto similarity index 100% rename from api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto rename to api/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto diff --git a/api/envoy/admin/v3/server_info.proto b/api/envoy/admin/v3/server_info.proto index 108cb8a115487..15b2c74e88310 100644 --- a/api/envoy/admin/v3/server_info.proto +++ b/api/envoy/admin/v3/server_info.proto @@ -87,9 +87,9 @@ message CommandLineOptions { Immediate = 1; } - reserved 12, 20, 21; + reserved 12, 20, 21, 29; - reserved "max_stats", "max_obj_name_len"; + reserved "max_stats", "max_obj_name_len", "bootstrap_version"; // See :option:`--base-id` for details. uint64 base_id = 1; @@ -178,9 +178,6 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; - // See :option:`--bootstrap-version` for details. - uint32 bootstrap_version = 29; - // See :option:`--enable-fine-grain-logging` for details. bool enable_fine_grain_logging = 34; diff --git a/api/envoy/admin/v4alpha/BUILD b/api/envoy/admin/v4alpha/BUILD deleted file mode 100644 index 74de2ca2a3d53..0000000000000 --- a/api/envoy/admin/v4alpha/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/admin/v3:pkg", - "//envoy/config/bootstrap/v4alpha:pkg", - "//envoy/config/cluster/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/admin/v4alpha/certs.proto b/api/envoy/admin/v4alpha/certs.proto deleted file mode 100644 index 0dd868f71fa6a..0000000000000 --- a/api/envoy/admin/v4alpha/certs.proto +++ /dev/null @@ -1,86 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "CertsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Certificates] - -// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to -// display certificate information. See :ref:`/certs ` for more -// information. -message Certificates { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificates"; - - // List of certificates known to an Envoy. - repeated Certificate certificates = 1; -} - -message Certificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificate"; - - // Details of CA certificate. - repeated CertificateDetails ca_cert = 1; - - // Details of Certificate Chain - repeated CertificateDetails cert_chain = 2; -} - -// [#next-free-field: 8] -message CertificateDetails { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails"; - - message OcspDetails { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.CertificateDetails.OcspDetails"; - - // Indicates the time from which the OCSP response is valid. - google.protobuf.Timestamp valid_from = 1; - - // Indicates the time at which the OCSP response expires. - google.protobuf.Timestamp expiration = 2; - } - - // Path of the certificate. - string path = 1; - - // Certificate Serial Number. - string serial_number = 2; - - // List of Subject Alternate names. - repeated SubjectAlternateName subject_alt_names = 3; - - // Minimum of days until expiration of certificate and it's chain. - uint64 days_until_expiration = 4; - - // Indicates the time from which the certificate is valid. - google.protobuf.Timestamp valid_from = 5; - - // Indicates the time at which the certificate expires. - google.protobuf.Timestamp expiration_time = 6; - - // Details related to the OCSP response associated with this certificate, if any. - OcspDetails ocsp_details = 7; -} - -message SubjectAlternateName { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.SubjectAlternateName"; - - // Subject Alternate Name. - oneof name { - string dns = 1; - - string uri = 2; - - string ip_address = 3; - } -} diff --git a/api/envoy/admin/v4alpha/clusters.proto b/api/envoy/admin/v4alpha/clusters.proto deleted file mode 100644 index 12969a28d0082..0000000000000 --- a/api/envoy/admin/v4alpha/clusters.proto +++ /dev/null @@ -1,176 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/admin/v4alpha/metrics.proto"; -import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/health_check.proto"; -import "envoy/type/v3/percent.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "ClustersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Clusters] - -// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. -// See :ref:`/clusters ` for more information. -message Clusters { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Clusters"; - - // Mapping from cluster name to each cluster's status. - repeated ClusterStatus cluster_statuses = 1; -} - -// Details an individual cluster's current status. -// [#next-free-field: 8] -message ClusterStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClusterStatus"; - - // Name of the cluster. - string name = 1; - - // Denotes whether this cluster was added via API or configured statically. - bool added_via_api = 2; - - // The success rate threshold used in the last interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used to calculate the threshold. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used to calculate the threshold. - // The threshold is used to eject hosts based on their success rate. See - // :ref:`Cluster outlier detection ` documentation for details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.v3.Percent success_rate_ejection_threshold = 3; - - // Mapping from host address to the host's current status. - repeated HostStatus host_statuses = 4; - - // The success rate threshold used in the last interval when only locally originated failures were - // taken into account and externally originated errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. The threshold is used to eject hosts based on their success rate. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.v3.Percent local_origin_success_rate_ejection_threshold = 5; - - // :ref:`Circuit breaking ` settings of the cluster. - config.cluster.v4alpha.CircuitBreakers circuit_breakers = 6; - - // Observability name of the cluster. - string observability_name = 7; -} - -// Current state of a particular host. -// [#next-free-field: 10] -message HostStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostStatus"; - - // Address of this host. - config.core.v4alpha.Address address = 1; - - // List of stats specific to this host. - repeated SimpleMetric stats = 2; - - // The host's current health status. - HostHealthStatus health_status = 3; - - // Request success rate for this host over the last calculated interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used in success rate - // calculation. If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used in success rate calculation. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.v3.Percent success_rate = 4; - - // The host's weight. If not configured, the value defaults to 1. - uint32 weight = 5; - - // The hostname of the host, if applicable. - string hostname = 6; - - // The host's priority. If not configured, the value defaults to 0 (highest priority). - uint32 priority = 7; - - // Request success rate for this host over the last calculated - // interval when only locally originated errors are taken into account and externally originated - // errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.v3.Percent local_origin_success_rate = 8; - - // locality of the host. - config.core.v4alpha.Locality locality = 9; -} - -// Health status for a host. -// [#next-free-field: 9] -message HostHealthStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostHealthStatus"; - - // The host is currently failing active health checks. - bool failed_active_health_check = 1; - - // The host is currently considered an outlier and has been ejected. - bool failed_outlier_check = 2; - - // The host is currently being marked as degraded through active health checking. - bool failed_active_degraded_check = 4; - - // The host has been removed from service discovery, but is being stabilized due to active - // health checking. - bool pending_dynamic_removal = 5; - - // The host has not yet been health checked. - bool pending_active_hc = 6; - - // The host should be excluded from panic, spillover, etc. calculations because it was explicitly - // taken out of rotation via protocol signal and is not meant to be routed to. - bool excluded_via_immediate_hc_fail = 7; - - // The host failed active HC due to timeout. - bool active_hc_timeout = 8; - - // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported - // here. - // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] - config.core.v4alpha.HealthStatus eds_health_status = 3; -} diff --git a/api/envoy/admin/v4alpha/config_dump.proto b/api/envoy/admin/v4alpha/config_dump.proto deleted file mode 100644 index 2e36bc16f9b60..0000000000000 --- a/api/envoy/admin/v4alpha/config_dump.proto +++ /dev/null @@ -1,484 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/config/bootstrap/v4alpha/bootstrap.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "ConfigDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: ConfigDump] - -// Resource status from the view of a xDS client, which tells the synchronization -// status between the xDS client and the xDS server. -enum ClientResourceStatus { - // Resource status is not available/unknown. - UNKNOWN = 0; - - // Client requested this resource but hasn't received any update from management - // server. The client will not fail requests, but will queue them until update - // arrives or the client times out waiting for the resource. - REQUESTED = 1; - - // This resource has been requested by the client but has either not been - // delivered by the server or was previously delivered by the server and then - // subsequently removed from resources provided by the server. For more - // information, please refer to the :ref:`"Knowing When a Requested Resource - // Does Not Exist" ` section. - DOES_NOT_EXIST = 2; - - // Client received this resource and replied with ACK. - ACKED = 3; - - // Client received this resource and replied with NACK. - NACKED = 4; -} - -// The :ref:`/config_dump ` admin endpoint uses this wrapper -// message to maintain and serve arbitrary configuration information from any component in Envoy. -message ConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ConfigDump"; - - // This list is serialized and dumped in its entirety at the - // :ref:`/config_dump ` endpoint. - // - // The following configurations are currently supported and will be dumped in the order given - // below: - // - // * *bootstrap*: :ref:`BootstrapConfigDump ` - // * *clusters*: :ref:`ClustersConfigDump ` - // * *endpoints*: :ref:`EndpointsConfigDump ` - // * *listeners*: :ref:`ListenersConfigDump ` - // * *scoped_routes*: :ref:`ScopedRoutesConfigDump ` - // * *routes*: :ref:`RoutesConfigDump ` - // * *secrets*: :ref:`SecretsConfigDump ` - // - // EDS Configuration will only be dumped by using parameter `?include_eds` - // - // You can filter output with the resource and mask query parameters. - // See :ref:`/config_dump?resource={} `, - // :ref:`/config_dump?mask={} `, - // or :ref:`/config_dump?resource={},mask={} - // ` for more information. - repeated google.protobuf.Any configs = 1; -} - -message UpdateFailureState { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UpdateFailureState"; - - // What the component configuration would have been if the update had succeeded. - // This field may not be populated by xDS clients due to storage overhead. - google.protobuf.Any failed_configuration = 1; - - // Time of the latest failed update attempt. - google.protobuf.Timestamp last_update_attempt = 2; - - // Details about the last failed update attempt. - string details = 3; - - // This is the version of the rejected resource. - // [#not-implemented-hide:] - string version_info = 4; -} - -// This message describes the bootstrap configuration that Envoy was started with. This includes -// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate -// the static portions of an Envoy configuration by reusing the output as the bootstrap -// configuration for another Envoy. -message BootstrapConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.BootstrapConfigDump"; - - config.bootstrap.v4alpha.Bootstrap bootstrap = 1; - - // The timestamp when the BootstrapConfig was last updated. - google.protobuf.Timestamp last_updated = 2; -} - -// Envoy's listener manager fills this message with all currently known listeners. Listener -// configuration information can be used to recreate an Envoy configuration by populating all -// listeners as static listeners or by returning them in a LDS response. -message ListenersConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenersConfigDump"; - - // Describes a statically loaded listener. - message StaticListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ListenersConfigDump.StaticListener"; - - // The listener config. - google.protobuf.Any listener = 1; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 2; - } - - message DynamicListenerState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ListenersConfigDump.DynamicListenerState"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the listener was loaded. In the future, discrete per-listener versions may be supported - // by the API. - string version_info = 1; - - // The listener config. - google.protobuf.Any listener = 2; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 3; - } - - // Describes a dynamically loaded listener via the LDS API. - // [#next-free-field: 7] - message DynamicListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ListenersConfigDump.DynamicListener"; - - // The name or unique id of this listener, pulled from the DynamicListenerState config. - string name = 1; - - // The listener state for any active listener by this name. - // These are listeners that are available to service data plane traffic. - DynamicListenerState active_state = 2; - - // The listener state for any warming listener by this name. - // These are listeners that are currently undergoing warming in preparation to service data - // plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the warming listeners should generally be discarded. - DynamicListenerState warming_state = 3; - - // The listener state for any draining listener by this name. - // These are listeners that are currently undergoing draining in preparation to stop servicing - // data plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the draining listeners should generally be discarded. - DynamicListenerState draining_state = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - - // The statically loaded listener configs. - repeated StaticListener static_listeners = 2; - - // State for any warming, active, or draining listeners. - repeated DynamicListener dynamic_listeners = 3; -} - -// Envoy's cluster manager fills this message with all currently known clusters. Cluster -// configuration information can be used to recreate an Envoy configuration by populating all -// clusters as static clusters or by returning them in a CDS response. -message ClustersConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClustersConfigDump"; - - // Describes a statically loaded cluster. - message StaticCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ClustersConfigDump.StaticCluster"; - - // The cluster config. - google.protobuf.Any cluster = 1; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // Describes a dynamically loaded cluster via the CDS API. - // [#next-free-field: 6] - message DynamicCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ClustersConfigDump.DynamicCluster"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by - // the API. - string version_info = 1; - - // The cluster config. - google.protobuf.Any cluster = 2; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - - // The statically loaded cluster configs. - repeated StaticCluster static_clusters = 2; - - // The dynamically loaded active clusters. These are clusters that are available to service - // data plane traffic. - repeated DynamicCluster dynamic_active_clusters = 3; - - // The dynamically loaded warming clusters. These are clusters that are currently undergoing - // warming in preparation to service data plane traffic. Note that if attempting to recreate an - // Envoy configuration from a configuration dump, the warming clusters should generally be - // discarded. - repeated DynamicCluster dynamic_warming_clusters = 4; -} - -// Envoy's RDS implementation fills this message with all currently loaded routes, as described by -// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration -// or defined inline while configuring listeners are separated from those configured dynamically via RDS. -// Route configuration information can be used to recreate an Envoy configuration by populating all routes -// as static routes or by returning them in RDS responses. -message RoutesConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.RoutesConfigDump"; - - message StaticRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.RoutesConfigDump.StaticRouteConfig"; - - // The route config. - google.protobuf.Any route_config = 1; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // [#next-free-field: 6] - message DynamicRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the route configuration was loaded. - string version_info = 1; - - // The route config. - google.protobuf.Any route_config = 2; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // The statically loaded route configs. - repeated StaticRouteConfig static_route_configs = 2; - - // The dynamically loaded route configs. - repeated DynamicRouteConfig dynamic_route_configs = 3; -} - -// Envoy's scoped RDS implementation fills this message with all currently loaded route -// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both -// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the -// dynamically obtained scopes via the SRDS API. -message ScopedRoutesConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ScopedRoutesConfigDump"; - - message InlineScopedRouteConfigs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs"; - - // The name assigned to the scoped route configurations. - string name = 1; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 2; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - // [#next-free-field: 7] - message DynamicScopedRouteConfigs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs"; - - // The name assigned to the scoped route configurations. - string name = 1; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the scoped routes configuration was loaded. - string version_info = 2; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 3; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // The statically loaded scoped route configs. - repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; - - // The dynamically loaded scoped route configs. - repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; -} - -// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. -message SecretsConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SecretsConfigDump"; - - // DynamicSecret contains secret information fetched via SDS. - // [#next-free-field: 7] - message DynamicSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.SecretsConfigDump.DynamicSecret"; - - // The name assigned to the secret. - string name = 1; - - // This is the per-resource version information. - string version_info = 2; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 3; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // StaticSecret specifies statically loaded secret in bootstrap. - message StaticSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.SecretsConfigDump.StaticSecret"; - - // The name assigned to the secret. - string name = 1; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 2; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 3; - } - - // The statically loaded secrets. - repeated StaticSecret static_secrets = 1; - - // The dynamically loaded active secrets. These are secrets that are available to service - // clusters or listeners. - repeated DynamicSecret dynamic_active_secrets = 2; - - // The dynamically loaded warming secrets. These are secrets that are currently undergoing - // warming in preparation to service clusters or listeners. - repeated DynamicSecret dynamic_warming_secrets = 3; -} - -// Envoy's admin fill this message with all currently known endpoints. Endpoint -// configuration information can be used to recreate an Envoy configuration by populating all -// endpoints as static endpoints or by returning them in an EDS response. -message EndpointsConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump"; - - message StaticEndpointConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig"; - - // The endpoint config. - google.protobuf.Any endpoint_config = 1; - - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // [#next-free-field: 6] - message DynamicEndpointConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; - - // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the endpoint configuration was loaded. - string version_info = 1; - - // The endpoint config. - google.protobuf.Any endpoint_config = 2; - - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // The statically loaded endpoint configs. - repeated StaticEndpointConfig static_endpoint_configs = 2; - - // The dynamically loaded endpoint configs. - repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; -} diff --git a/api/envoy/admin/v4alpha/init_dump.proto b/api/envoy/admin/v4alpha/init_dump.proto deleted file mode 100644 index 81c423e52024d..0000000000000 --- a/api/envoy/admin/v4alpha/init_dump.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "InitDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: InitDump] - -// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers, -// which provides the information of their unready targets. -// The :ref:`/init_dump ` will dump all unready targets information. -message UnreadyTargetsDumps { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UnreadyTargetsDumps"; - - // Message of unready targets information of an init manager. - message UnreadyTargetsDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump"; - - // Name of the init manager. Example: "init_manager_xxx". - string name = 1; - - // Names of unready targets of the init manager. Example: "target_xxx". - repeated string target_names = 2; - } - - // You can choose specific component to dump unready targets with mask query parameter. - // See :ref:`/init_dump?mask={} ` for more information. - // The dumps of unready targets of all init managers. - repeated UnreadyTargetsDump unready_targets_dumps = 1; -} diff --git a/api/envoy/admin/v4alpha/listeners.proto b/api/envoy/admin/v4alpha/listeners.proto deleted file mode 100644 index 89bdc4c5bbf8d..0000000000000 --- a/api/envoy/admin/v4alpha/listeners.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "ListenersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Listeners] - -// Admin endpoint uses this wrapper for `/listeners` to display listener status information. -// See :ref:`/listeners ` for more information. -message Listeners { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Listeners"; - - // List of listener statuses. - repeated ListenerStatus listener_statuses = 1; -} - -// Details an individual listener's current status. -message ListenerStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenerStatus"; - - // Name of the listener - string name = 1; - - // The actual local address that the listener is listening on. If a listener was configured - // to listen on port 0, then this address has the port that was allocated by the OS. - config.core.v4alpha.Address local_address = 2; -} diff --git a/api/envoy/admin/v4alpha/memory.proto b/api/envoy/admin/v4alpha/memory.proto deleted file mode 100644 index d2f0b57229ce8..0000000000000 --- a/api/envoy/admin/v4alpha/memory.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "MemoryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Memory] - -// Proto representation of the internal memory consumption of an Envoy instance. These represent -// values extracted from an internal TCMalloc instance. For more information, see the section of the -// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). -// [#next-free-field: 7] -message Memory { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Memory"; - - // The number of bytes allocated by the heap for Envoy. This is an alias for - // `generic.current_allocated_bytes`. - uint64 allocated = 1; - - // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for - // `generic.heap_size`. - uint64 heap_size = 2; - - // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards - // virtual memory usage, and depending on the OS, typically do not count towards physical memory - // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. - uint64 pageheap_unmapped = 3; - - // The number of bytes in free, mapped pages in the page heap. These bytes always count towards - // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also - // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. - uint64 pageheap_free = 4; - - // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias - // for `tcmalloc.current_total_thread_cache_bytes`. - uint64 total_thread_cache = 5; - - // The number of bytes of the physical memory usage by the allocator. This is an alias for - // `generic.total_physical_bytes`. - uint64 total_physical_bytes = 6; -} diff --git a/api/envoy/admin/v4alpha/metrics.proto b/api/envoy/admin/v4alpha/metrics.proto deleted file mode 100644 index 78613320038b7..0000000000000 --- a/api/envoy/admin/v4alpha/metrics.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "MetricsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Metrics] - -// Proto representation of an Envoy Counter or Gauge value. -message SimpleMetric { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SimpleMetric"; - - enum Type { - COUNTER = 0; - GAUGE = 1; - } - - // Type of the metric represented. - Type type = 1; - - // Current metric value. - uint64 value = 2; - - // Name of the metric. - string name = 3; -} diff --git a/api/envoy/admin/v4alpha/mutex_stats.proto b/api/envoy/admin/v4alpha/mutex_stats.proto deleted file mode 100644 index 6f9fcd548cc04..0000000000000 --- a/api/envoy/admin/v4alpha/mutex_stats.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "MutexStatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: MutexStats] - -// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run -// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` -// [docs](https://abseil.io/about/design/mutex#extra-features). -// -// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not -// correspond to core clock frequency. For more information, see the `CycleClock` -// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). -message MutexStats { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.MutexStats"; - - // The number of individual mutex contentions which have occurred since startup. - uint64 num_contentions = 1; - - // The length of the current contention wait cycle. - uint64 current_wait_cycles = 2; - - // The lifetime total of all contention wait cycles. - uint64 lifetime_wait_cycles = 3; -} diff --git a/api/envoy/admin/v4alpha/server_info.proto b/api/envoy/admin/v4alpha/server_info.proto deleted file mode 100644 index 18e59c92b0eff..0000000000000 --- a/api/envoy/admin/v4alpha/server_info.proto +++ /dev/null @@ -1,194 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "ServerInfoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Server State] - -// Proto representation of the value returned by /server_info, containing -// server version/server status information. -// [#next-free-field: 8] -message ServerInfo { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ServerInfo"; - - enum State { - // Server is live and serving traffic. - LIVE = 0; - - // Server is draining listeners in response to external health checks failing. - DRAINING = 1; - - // Server has not yet completed cluster manager initialization. - PRE_INITIALIZING = 2; - - // Server is running the cluster manager initialization callbacks (e.g., RDS). - INITIALIZING = 3; - } - - // Server version. - string version = 1; - - // State of the server. - State state = 2; - - // Uptime since current epoch was started. - google.protobuf.Duration uptime_current_epoch = 3; - - // Uptime since the start of the first epoch. - google.protobuf.Duration uptime_all_epochs = 4; - - // Hot restart version. - string hot_restart_version = 5; - - // Command line options the server is currently running with. - CommandLineOptions command_line_options = 6; - - // Populated node identity of this server. - config.core.v4alpha.Node node = 7; -} - -// [#next-free-field: 38] -message CommandLineOptions { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; - - enum IpVersion { - v4 = 0; - v6 = 1; - } - - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - - enum DrainStrategy { - // Gradually discourage connections over the course of the drain period. - Gradual = 0; - - // Discourage all connections for the duration of the drain sequence. - Immediate = 1; - } - - reserved 12, 20, 21; - - reserved "max_stats", "max_obj_name_len"; - - // See :option:`--base-id` for details. - uint64 base_id = 1; - - // See :option:`--use-dynamic-base-id` for details. - bool use_dynamic_base_id = 31; - - // See :option:`--base-id-path` for details. - string base_id_path = 32; - - // See :option:`--concurrency` for details. - uint32 concurrency = 2; - - // See :option:`--config-path` for details. - string config_path = 3; - - // See :option:`--config-yaml` for details. - string config_yaml = 4; - - // See :option:`--allow-unknown-static-fields` for details. - bool allow_unknown_static_fields = 5; - - // See :option:`--reject-unknown-dynamic-fields` for details. - bool reject_unknown_dynamic_fields = 26; - - // See :option:`--ignore-unknown-dynamic-fields` for details. - bool ignore_unknown_dynamic_fields = 30; - - // See :option:`--admin-address-path` for details. - string admin_address_path = 6; - - // See :option:`--local-address-ip-version` for details. - IpVersion local_address_ip_version = 7; - - // See :option:`--log-level` for details. - string log_level = 8; - - // See :option:`--component-log-level` for details. - string component_log_level = 9; - - // See :option:`--log-format` for details. - string log_format = 10; - - // See :option:`--log-format-escaped` for details. - bool log_format_escaped = 27; - - // See :option:`--log-path` for details. - string log_path = 11; - - // See :option:`--service-cluster` for details. - string service_cluster = 13; - - // See :option:`--service-node` for details. - string service_node = 14; - - // See :option:`--service-zone` for details. - string service_zone = 15; - - // See :option:`--file-flush-interval-msec` for details. - google.protobuf.Duration file_flush_interval = 16; - - // See :option:`--drain-time-s` for details. - google.protobuf.Duration drain_time = 17; - - // See :option:`--drain-strategy` for details. - DrainStrategy drain_strategy = 33; - - // See :option:`--parent-shutdown-time-s` for details. - google.protobuf.Duration parent_shutdown_time = 18; - - // See :option:`--mode` for details. - Mode mode = 19; - - // See :option:`--disable-hot-restart` for details. - bool disable_hot_restart = 22; - - // See :option:`--enable-mutex-tracing` for details. - bool enable_mutex_tracing = 23; - - // See :option:`--restart-epoch` for details. - uint32 restart_epoch = 24; - - // See :option:`--cpuset-threads` for details. - bool cpuset_threads = 25; - - // See :option:`--disable-extensions` for details. - repeated string disabled_extensions = 28; - - // See :option:`--bootstrap-version` for details. - uint32 bootstrap_version = 29; - - // See :option:`--enable-fine-grain-logging` for details. - bool enable_fine_grain_logging = 34; - - // See :option:`--socket-path` for details. - string socket_path = 35; - - // See :option:`--socket-mode` for details. - uint32 socket_mode = 36; - - // See :option:`--enable-core-dump` for details. - bool enable_core_dump = 37; -} diff --git a/api/envoy/admin/v4alpha/tap.proto b/api/envoy/admin/v4alpha/tap.proto deleted file mode 100644 index e892593804188..0000000000000 --- a/api/envoy/admin/v4alpha/tap.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/config/tap/v4alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap] - -// The /tap admin request body that is used to configure an active tap session. -message TapRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.TapRequest"; - - // The opaque configuration ID used to match the configuration to a loaded extension. - // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The tap configuration to load. - config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/config/accesslog/v4alpha/BUILD b/api/envoy/config/accesslog/v4alpha/BUILD deleted file mode 100644 index 68064d3b08d1e..0000000000000 --- a/api/envoy/config/accesslog/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/accesslog/v4alpha/accesslog.proto b/api/envoy/config/accesslog/v4alpha/accesslog.proto deleted file mode 100644 index 3e0c7f53598cc..0000000000000 --- a/api/envoy/config/accesslog/v4alpha/accesslog.proto +++ /dev/null @@ -1,326 +0,0 @@ -syntax = "proto3"; - -package envoy.config.accesslog.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/metadata.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.accesslog.v4alpha"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common access log types] - -message AccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.AccessLog"; - - reserved 3; - - reserved "config"; - - // The name of the access log extension to instantiate. - // The name must match one of the compiled in loggers. - // See the :ref:`extensions listed in typed_config below ` for the default list of available loggers. - string name = 1; - - // Filter which is used to determine if the access log needs to be written. - AccessLogFilter filter = 2; - - // Custom configuration that must be set according to the access logger extension being instantiated. - // [#extension-category: envoy.access_loggers] - oneof config_type { - google.protobuf.Any typed_config = 4; - } -} - -// [#next-free-field: 13] -message AccessLogFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.AccessLogFilter"; - - oneof filter_specifier { - option (validate.required) = true; - - // Status code filter. - StatusCodeFilter status_code_filter = 1; - - // Duration filter. - DurationFilter duration_filter = 2; - - // Not health check filter. - NotHealthCheckFilter not_health_check_filter = 3; - - // Traceable filter. - TraceableFilter traceable_filter = 4; - - // Runtime filter. - RuntimeFilter runtime_filter = 5; - - // And filter. - AndFilter and_filter = 6; - - // Or filter. - OrFilter or_filter = 7; - - // Header filter. - HeaderFilter header_filter = 8; - - // Response flag filter. - ResponseFlagFilter response_flag_filter = 9; - - // gRPC status filter. - GrpcStatusFilter grpc_status_filter = 10; - - // Extension filter. - ExtensionFilter extension_filter = 11; - - // Metadata Filter - MetadataFilter metadata_filter = 12; - } -} - -// Filter on an integer comparison. -message ComparisonFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.ComparisonFilter"; - - enum Op { - // = - EQ = 0; - - // >= - GE = 1; - - // <= - LE = 2; - } - - // Comparison operator. - Op op = 1 [(validate.rules).enum = {defined_only: true}]; - - // Value to compare against. - core.v4alpha.RuntimeUInt32 value = 2; -} - -// Filters on HTTP response/status code. -message StatusCodeFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.StatusCodeFilter"; - - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters on total request duration in milliseconds. -message DurationFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.DurationFilter"; - - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters for requests that are not health check requests. A health check -// request is marked by the health check filter. -message NotHealthCheckFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.NotHealthCheckFilter"; -} - -// Filters for requests that are traceable. See the tracing overview for more -// information on how a request becomes traceable. -message TraceableFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.TraceableFilter"; -} - -// Filters for random sampling of requests. -message RuntimeFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.RuntimeFilter"; - - // Runtime key to get an optional overridden numerator for use in the - // *percent_sampled* field. If found in runtime, this value will replace the - // default numerator. - string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; - - // The default sampling percentage. If not specified, defaults to 0% with - // denominator of 100. - type.v3.FractionalPercent percent_sampled = 2; - - // By default, sampling pivots on the header - // :ref:`x-request-id` being - // present. If :ref:`x-request-id` - // is present, the filter will consistently sample across multiple hosts based - // on the runtime key value and the value extracted from - // :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will - // randomly sample based on the runtime key value alone. - // *use_independent_randomness* can be used for logging kill switches within - // complex nested :ref:`AndFilter - // ` and :ref:`OrFilter - // ` blocks that are easier to - // reason about from a probability perspective (i.e., setting to true will - // cause the filter to behave like an independent random variable when - // composed within logical operator filters). - bool use_independent_randomness = 3; -} - -// Performs a logical “and” operation on the result of each filter in filters. -// Filters are evaluated sequentially and if one of them returns false, the -// filter returns false immediately. -message AndFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.AndFilter"; - - repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// Performs a logical “or” operation on the result of each individual filter. -// Filters are evaluated sequentially and if one of them returns true, the -// filter returns true immediately. -message OrFilter { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.OrFilter"; - - repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; -} - -// Filters requests based on the presence or value of a request header. -message HeaderFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.HeaderFilter"; - - // Only requests with a header which matches the specified HeaderMatcher will - // pass the filter check. - route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; -} - -// Filters requests that received responses with an Envoy response flag set. -// A list of the response flags can be found -// in the access log formatter -// :ref:`documentation`. -message ResponseFlagFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.ResponseFlagFilter"; - - // Only responses with the any of the flags listed in this field will be - // logged. This field is optional. If it is not specified, then any response - // flag will pass the filter check. - repeated string flags = 1 [(validate.rules).repeated = { - items { - string { - in: "LH" - in: "UH" - in: "UT" - in: "LR" - in: "UR" - in: "UF" - in: "UC" - in: "UO" - in: "NR" - in: "DI" - in: "FI" - in: "RL" - in: "UAEX" - in: "RLSE" - in: "DC" - in: "URX" - in: "SI" - in: "IH" - in: "DPE" - in: "UMSDR" - in: "RFCF" - in: "NFCF" - in: "DT" - in: "UPE" - in: "NC" - in: "OM" - } - } - }]; -} - -// Filters gRPC requests based on their response status. If a gRPC status is not -// provided, the filter will infer the status from the HTTP status code. -message GrpcStatusFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.GrpcStatusFilter"; - - enum Status { - OK = 0; - CANCELED = 1; - UNKNOWN = 2; - INVALID_ARGUMENT = 3; - DEADLINE_EXCEEDED = 4; - NOT_FOUND = 5; - ALREADY_EXISTS = 6; - PERMISSION_DENIED = 7; - RESOURCE_EXHAUSTED = 8; - FAILED_PRECONDITION = 9; - ABORTED = 10; - OUT_OF_RANGE = 11; - UNIMPLEMENTED = 12; - INTERNAL = 13; - UNAVAILABLE = 14; - DATA_LOSS = 15; - UNAUTHENTICATED = 16; - } - - // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - - // If included and set to true, the filter will instead block all responses - // with a gRPC status or inferred gRPC status enumerated in statuses, and - // allow all other responses. - bool exclude = 2; -} - -// Filters based on matching dynamic metadata. -// If the matcher path and key correspond to an existing key in dynamic -// metadata, the request is logged only if the matcher value is equal to the -// metadata value. If the matcher path and key *do not* correspond to an -// existing key in dynamic metadata, the request is logged only if -// match_if_key_not_found is "true" or unset. -message MetadataFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.MetadataFilter"; - - // Matcher to check metadata for specified value. For example, to match on the - // access_log_hint metadata, set the filter to "envoy.common" and the path to - // "access_log_hint", and the value to "true". - type.matcher.v4alpha.MetadataMatcher matcher = 1; - - // Default result if the key does not exist in dynamic metadata: if unset or - // true, then log; if false, then don't log. - google.protobuf.BoolValue match_if_key_not_found = 2; -} - -// Extension filter is statically registered at runtime. -message ExtensionFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.ExtensionFilter"; - - reserved 2; - - reserved "config"; - - // The name of the filter implementation to instantiate. The name must - // match a statically registered filter. - string name = 1; - - // Custom configuration that depends on the filter being instantiated. - oneof config_type { - google.protobuf.Any typed_config = 3; - } -} diff --git a/api/envoy/config/bootstrap/v4alpha/BUILD b/api/envoy/config/bootstrap/v4alpha/BUILD deleted file mode 100644 index 5dc1c5c61f7db..0000000000000 --- a/api/envoy/config/bootstrap/v4alpha/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/config/bootstrap/v3:pkg", - "//envoy/config/cluster/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/listener/v4alpha:pkg", - "//envoy/config/metrics/v4alpha:pkg", - "//envoy/config/overload/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto deleted file mode 100644 index 5c45b8f7dbce9..0000000000000 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ /dev/null @@ -1,620 +0,0 @@ -syntax = "proto3"; - -package envoy.config.bootstrap.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; -import "envoy/config/cluster/v4alpha/cluster.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/event_service_config.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/resolver.proto"; -import "envoy/config/core/v4alpha/socket_option.proto"; -import "envoy/config/listener/v4alpha/listener.proto"; -import "envoy/config/metrics/v4alpha/stats.proto"; -import "envoy/config/overload/v3/overload.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.bootstrap.v4alpha"; -option java_outer_classname = "BootstrapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v3 configuration. See the :ref:`v3 configuration overview -// ` for more detail. - -// Bootstrap :ref:`configuration overview `. -// [#next-free-field: 33] -message Bootstrap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Bootstrap"; - - message StaticResources { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; - - // Static :ref:`Listeners `. These listeners are - // available regardless of LDS configuration. - repeated listener.v4alpha.Listener listeners = 1; - - // If a network based configuration source is specified for :ref:`cds_config - // `, it's necessary - // to have some initial cluster definitions available to allow Envoy to know - // how to speak to the management server. These cluster definitions may not - // use :ref:`EDS ` (i.e. they should be static - // IP or DNS-based). - repeated cluster.v4alpha.Cluster clusters = 2; - - // These static secrets can be used by :ref:`SdsSecretConfig - // ` - repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; - } - - // [#next-free-field: 7] - message DynamicResources { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; - - reserved 4; - - // All :ref:`Listeners ` are provided by a single - // :ref:`LDS ` configuration source. - core.v4alpha.ConfigSource lds_config = 1; - - // xdstp:// resource locator for listener collection. - // [#not-implemented-hide:] - string lds_resources_locator = 5; - - // All post-bootstrap :ref:`Cluster ` definitions are - // provided by a single :ref:`CDS ` - // configuration source. - core.v4alpha.ConfigSource cds_config = 2; - - // xdstp:// resource locator for cluster collection. - // [#not-implemented-hide:] - string cds_resources_locator = 6; - - // A single :ref:`ADS ` source may be optionally - // specified. This must have :ref:`api_type - // ` :ref:`GRPC - // `. Only - // :ref:`ConfigSources ` that have - // the :ref:`ads ` field set will be - // streamed on the ADS channel. - core.v4alpha.ApiConfigSource ads_config = 3; - } - - reserved 10, 11, 8, 9, 20; - - reserved "runtime", "watchdog", "tracing", "use_tcp_for_dns_lookups"; - - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - core.v4alpha.Node node = 1; - - // A list of :ref:`Node ` field names - // that will be included in the context parameters of the effective - // xdstp:// URL that is sent in a discovery request when resource - // locators are used for LDS/CDS. Any non-string field will have its JSON - // encoding set as the context parameter value, with the exception of - // metadata, which will be flattened (see example below). The supported field - // names are: - // - "cluster" - // - "id" - // - "locality.region" - // - "locality.sub_zone" - // - "locality.zone" - // - "metadata" - // - "user_agent_build_version.metadata" - // - "user_agent_build_version.version" - // - "user_agent_name" - // - "user_agent_version" - // - // The node context parameters act as a base layer dictionary for the context - // parameters (i.e. more specific resource specific context parameters will - // override). Field names will be prefixed with “udpa.node.” when included in - // context parameters. - // - // For example, if node_context_params is ``["user_agent_name", "metadata"]``, - // the implied context parameters might be:: - // - // node.user_agent_name: "envoy" - // node.metadata.foo: "{\"bar\": \"baz\"}" - // node.metadata.some: "42" - // node.metadata.thing: "\"thing\"" - // - // [#not-implemented-hide:] - repeated string node_context_params = 26; - - // Statically specified resources. - StaticResources static_resources = 2; - - // xDS configuration sources. - DynamicResources dynamic_resources = 3; - - // Configuration for the cluster manager which owns all upstream clusters - // within the server. - ClusterManager cluster_manager = 4; - - // Health discovery service config option. - // (:ref:`core.ApiConfigSource `) - core.v4alpha.ApiConfigSource hds_config = 14; - - // Optional file system path to search for startup flag files. - string flags_path = 5; - - // Optional set of stats sinks. - repeated metrics.v4alpha.StatsSink stats_sinks = 6; - - // Configuration for internal processing of stats. - metrics.v4alpha.StatsConfig stats_config = 13; - - oneof stats_flush { - // Optional duration between flushes to configured stats sinks. For - // performance reasons Envoy latches counters and only flushes counters and - // gauges at a periodic interval. If not specified the default is 5000ms (5 - // seconds). Only one of `stats_flush_interval` or `stats_flush_on_admin` - // can be set. - // Duration must be at least 1ms and at most 5 min. - google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { - lt {seconds: 300} - gte {nanos: 1000000} - }]; - - // Flush stats to sinks only when queried for on the admin interface. If set, - // a flush timer is not created. Only one of `stats_flush_on_admin` or - // `stats_flush_interval` can be set. - bool stats_flush_on_admin = 29 [(validate.rules).bool = {const: true}]; - } - - // Optional watchdogs configuration. - // This is used for specifying different watchdogs for the different subsystems. - // [#extension-category: envoy.guarddog_actions] - Watchdogs watchdogs = 27; - - // Configuration for the runtime configuration provider. If not - // specified, a “null” provider will be used which will result in all defaults - // being used. - LayeredRuntime layered_runtime = 17; - - // Configuration for the local administration HTTP server. - Admin admin = 12; - - // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15 [ - (udpa.annotations.security).configure_for_untrusted_downstream = true, - (udpa.annotations.security).configure_for_untrusted_upstream = true - ]; - - // Enable :ref:`stats for event dispatcher `, defaults to false. - // Note that this records a value for each iteration of the event loop on every thread. This - // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value - // over the wire individually because the statsd protocol doesn't have any way to represent a - // histogram summary. Be aware that this can be a very large volume of data. - bool enable_dispatcher_stats = 16; - - // Optional string which will be used in lieu of x-envoy in prefixing headers. - // - // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be - // transformed into x-foo-retry-on etc. - // - // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the - // headers Envoy will trust for core code and core extensions only. Be VERY careful making - // changes to this string, especially in multi-layer Envoy deployments or deployments using - // extensions which are not upstream. - string header_prefix = 18; - - // Optional proxy version which will be used to set the value of :ref:`server.version statistic - // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. - google.protobuf.UInt64Value stats_server_version_override = 19; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // This may be overridden on a per-cluster basis in cds_config, when - // :ref:`dns_resolution_config ` - // is specified. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v4alpha.DnsResolutionConfig dns_resolution_config = 30; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - core.v4alpha.TypedExtensionConfig typed_dns_resolver_config = 31; - - // Specifies optional bootstrap extensions to be instantiated at startup time. - // Each item contains extension specific configuration. - // [#extension-category: envoy.bootstrap] - repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; - - // Specifies optional extensions instantiated at startup time and - // invoked during crash time on the request that caused the crash. - repeated FatalAction fatal_actions = 28; - - // Configuration sources that will participate in - // xdstp:// URL authority resolution. The algorithm is as - // follows: - // 1. The authority field is taken from the xdstp:// URL, call - // this *resource_authority*. - // 2. *resource_authority* is compared against the authorities in any peer - // *ConfigSource*. The peer *ConfigSource* is the configuration source - // message which would have been used unconditionally for resolution - // with opaque resource names. If there is a match with an authority, the - // peer *ConfigSource* message is used. - // 3. *resource_authority* is compared sequentially with the authorities in - // each configuration source in *config_sources*. The first *ConfigSource* - // to match wins. - // 4. As a fallback, if no configuration source matches, then - // *default_config_source* is used. - // 5. If *default_config_source* is not specified, resolution fails. - // [#not-implemented-hide:] - repeated core.v4alpha.ConfigSource config_sources = 22; - - // Default configuration source for xdstp:// URLs if all - // other resolution fails. - // [#not-implemented-hide:] - core.v4alpha.ConfigSource default_config_source = 23; - - // Optional overriding of default socket interface. The value must be the name of one of the - // socket interface factories initialized through a bootstrap extension - string default_socket_interface = 24; - - // Global map of CertificateProvider instances. These instances are referred to by name in the - // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name - // ` - // field. - // [#not-implemented-hide:] - map certificate_provider_instances = 25; - - // Specifies a set of headers that need to be registered as inline header. This configuration - // allows users to customize the inline headers on-demand at Envoy startup without modifying - // Envoy's source code. - // - // Note that the 'set-cookie' header cannot be registered as inline header. - repeated CustomInlineHeader inline_headers = 32; -} - -// Administration interface :ref:`operations documentation -// `. -// [#next-free-field: 6] -message Admin { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Admin"; - - reserved 1; - - reserved "access_log_path"; - - // Configuration for :ref:`access logs ` - // emitted by the administration server. - repeated accesslog.v4alpha.AccessLog access_log = 5; - - // The cpu profiler output path for the administration server. If no profile - // path is specified, the default is ‘/var/log/envoy/envoy.prof’. - string profile_path = 2; - - // The TCP address that the administration server will listen on. - // If not specified, Envoy will not start an administration server. - core.v4alpha.Address address = 3; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.v4alpha.SocketOption socket_options = 4; -} - -// Cluster manager :ref:`architecture overview `. -message ClusterManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.ClusterManager"; - - message OutlierDetection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.ClusterManager.OutlierDetection"; - - // Specifies the path to the outlier event log. - string event_log_path = 1; - - // [#not-implemented-hide:] - // The gRPC service for the outlier detection event service. - // If empty, outlier detection events won't be sent to a remote endpoint. - core.v4alpha.EventServiceConfig event_service = 2; - } - - // Name of the local cluster (i.e., the cluster that owns the Envoy running - // this configuration). In order to enable :ref:`zone aware routing - // ` this option must be set. - // If *local_cluster_name* is defined then :ref:`clusters - // ` must be defined in the :ref:`Bootstrap - // static cluster resources - // `. This is unrelated to - // the :option:`--service-cluster` option which does not `affect zone aware - // routing `_. - string local_cluster_name = 1; - - // Optional global configuration for outlier detection. - OutlierDetection outlier_detection = 2; - - // Optional configuration used to bind newly established upstream connections. - // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - core.v4alpha.BindConfig upstream_bind_config = 3; - - // A management server endpoint to stream load stats to via - // *StreamLoadStats*. This must have :ref:`api_type - // ` :ref:`GRPC - // `. - core.v4alpha.ApiConfigSource load_stats_config = 4; -} - -// Allows you to specify different watchdog configs for different subsystems. -// This allows finer tuned policies for the watchdog. If a subsystem is omitted -// the default values for that system will be used. -message Watchdogs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Watchdogs"; - - // Watchdog for the main thread. - Watchdog main_thread_watchdog = 1; - - // Watchdog for the worker threads. - Watchdog worker_watchdog = 2; -} - -// Envoy process watchdog configuration. When configured, this monitors for -// nonresponsive threads and kills the process after the configured thresholds. -// See the :ref:`watchdog documentation ` for more information. -// [#next-free-field: 8] -message Watchdog { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; - - message WatchdogAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Watchdog.WatchdogAction"; - - // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS. - // Within an event type, actions execute in the order they are configured. - // For KILL/MULTIKILL there is a default PANIC that will run after the - // registered actions and kills the process if it wasn't already killed. - // It might be useful to specify several debug actions, and possibly an - // alternate FATAL action. - enum WatchdogEvent { - UNKNOWN = 0; - KILL = 1; - MULTIKILL = 2; - MEGAMISS = 3; - MISS = 4; - } - - // Extension specific configuration for the action. - core.v4alpha.TypedExtensionConfig config = 1; - - WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // Register actions that will fire on given WatchDog events. - // See *WatchDogAction* for priority of events. - repeated WatchdogAction actions = 7; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_miss* statistic. If not specified the default is 200ms. - google.protobuf.Duration miss_timeout = 1; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_mega_miss* statistic. If not specified the default is - // 1000ms. - google.protobuf.Duration megamiss_timeout = 2; - - // If a watched thread has been nonresponsive for this duration, assume a - // programming error and kill the entire Envoy process. Set to 0 to disable - // kill behavior. If not specified the default is 0 (disabled). - google.protobuf.Duration kill_timeout = 3; - - // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is - // enabled. Enabling this feature would help to reduce risk of synchronized - // watchdog kill events across proxies due to external triggers. Set to 0 to - // disable. If not specified the default is 0 (disabled). - google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; - - // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) - // threads have been nonresponsive for at least this duration kill the entire - // Envoy process. Set to 0 to disable this behavior. If not specified the - // default is 0 (disabled). - google.protobuf.Duration multikill_timeout = 4; - - // Sets the threshold for *multikill_timeout* in terms of the percentage of - // nonresponsive threads required for the *multikill_timeout*. - // If not specified the default is 0. - type.v3.Percent multikill_threshold = 5; -} - -// Fatal actions to run while crashing. Actions can be safe (meaning they are -// async-signal safe) or unsafe. We run all safe actions before we run unsafe actions. -// If using an unsafe action that could get stuck or deadlock, it important to -// have an out of band system to terminate the process. -// -// The interface for the extension is ``Envoy::Server::Configuration::FatalAction``. -// *FatalAction* extensions live in the ``envoy.extensions.fatal_actions`` API -// namespace. -message FatalAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.FatalAction"; - - // Extension specific configuration for the action. It's expected to conform - // to the ``Envoy::Server::Configuration::FatalAction`` interface. - core.v4alpha.TypedExtensionConfig config = 1; -} - -// Runtime :ref:`configuration overview ` (deprecated). -message Runtime { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Runtime"; - - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. Envoy - // will watch the location for changes and reload the file system tree when - // they happen. If this parameter is not set, there will be no disk based - // runtime. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 2; - - // Specifies an optional subdirectory to load within the root directory. If - // specified and the directory exists, configuration values within this - // directory will override those found in the primary subdirectory. This is - // useful when Envoy is deployed across many different types of servers. - // Sometimes it is useful to have a per service cluster directory for runtime - // configuration. See below for exactly how the override directory is used. - string override_subdirectory = 3; - - // Static base runtime. This will be :ref:`overridden - // ` by other runtime layers, e.g. - // disk or admin. This follows the :ref:`runtime protobuf JSON representation - // encoding `. - google.protobuf.Struct base = 4; -} - -// [#next-free-field: 6] -message RuntimeLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.RuntimeLayer"; - - // :ref:`Disk runtime ` layer. - message DiskLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer"; - - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. - // Envoy will watch the location for changes and reload the file system tree - // when they happen. See documentation on runtime :ref:`atomicity - // ` for further details on how reloads are - // treated. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 3; - - // :ref:`Append ` the - // service cluster to the path under symlink root. - bool append_service_cluster = 2; - } - - // :ref:`Admin console runtime ` layer. - message AdminLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer"; - } - - // :ref:`Runtime Discovery Service (RTDS) ` layer. - message RtdsLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; - - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; - - // RTDS configuration source. - core.v4alpha.ConfigSource rtds_config = 2; - } - - // Descriptive name for the runtime layer. This is only used for the runtime - // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof layer_specifier { - option (validate.required) = true; - - // :ref:`Static runtime ` layer. - // This follows the :ref:`runtime protobuf JSON representation encoding - // `. Unlike static xDS resources, this static - // layer is overridable by later layers in the runtime virtual filesystem. - google.protobuf.Struct static_layer = 2; - - DiskLayer disk_layer = 3; - - AdminLayer admin_layer = 4; - - RtdsLayer rtds_layer = 5; - } -} - -// Runtime :ref:`configuration overview `. -message LayeredRuntime { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.LayeredRuntime"; - - // The :ref:`layers ` of the runtime. This is ordered - // such that later layers in the list overlay earlier entries. - repeated RuntimeLayer layers = 1; -} - -// Used to specify the header that needs to be registered as an inline header. -// -// If request or response contain multiple headers with the same name and the header -// name is registered as an inline header. Then multiple headers will be folded -// into one, and multiple header values will be concatenated by a suitable delimiter. -// The delimiter is generally a comma. -// -// For example, if 'foo' is registered as an inline header, and the headers contains -// the following two headers: -// -// .. code-block:: text -// -// foo: bar -// foo: eep -// -// Then they will eventually be folded into: -// -// .. code-block:: text -// -// foo: bar, eep -// -// Inline headers provide O(1) search performance, but each inline header imposes -// an additional memory overhead on all instances of the corresponding type of -// HeaderMap or TrailerMap. -message CustomInlineHeader { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.CustomInlineHeader"; - - enum InlineHeaderType { - REQUEST_HEADER = 0; - REQUEST_TRAILER = 1; - RESPONSE_HEADER = 2; - RESPONSE_TRAILER = 3; - } - - // The name of the header that is expected to be set as the inline header. - string inline_header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The type of the header that is expected to be set as the inline header. - InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 35bfa93ea352a..d6213d6fe9488 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -110,7 +110,7 @@ message Cluster { // this option or not. CLUSTER_PROVIDED = 6; - // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy + // Use the new :ref:`load_balancing_policy // ` field to determine the LB policy. // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field // and instead using the new load_balancing_policy field as the one and only mechanism for @@ -720,8 +720,7 @@ message Cluster { // The :ref:`load balancer type ` to use // when picking a host in the cluster. - // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; // Setting this is required for specifying members of // :ref:`STATIC`, @@ -782,7 +781,7 @@ message Cluster { [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Additional options when handling HTTP1 requests. - // This has been deprecated in favor of http_protocol_options fields in the in the + // This has been deprecated in favor of http_protocol_options fields in the // :ref:`http_protocol_options ` message. // http_protocol_options can be set via the cluster's // :ref:`extension_protocol_options`. @@ -798,7 +797,7 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - // This has been deprecated in favor of http2_protocol_options fields in the in the + // This has been deprecated in favor of http2_protocol_options fields in the // :ref:`http_protocol_options ` // message. http2_protocol_options can be set via the cluster's // :ref:`extension_protocol_options`. @@ -1008,7 +1007,7 @@ message Cluster { // servers of this cluster. repeated Filter filters = 40; - // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the + // New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. LoadBalancingPolicy load_balancing_policy = 41; @@ -1073,7 +1072,7 @@ message Cluster { bool connection_pool_per_downstream_connection = 51; } -// [#not-implemented-hide:] Extensible load balancing policy configuration. +// Extensible load balancing policy configuration. // // Every LB policy defined via this mechanism will be identified via a unique name using reverse // DNS notation. If the policy needs configuration parameters, it must define a message for its @@ -1099,14 +1098,11 @@ message LoadBalancingPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy.Policy"; - reserved 2; - - reserved "config"; + reserved 2, 1, 3; - // Required. The name of the LB policy. - string name = 1; + reserved "config", "name", "typed_config"; - google.protobuf.Any typed_config = 3; + core.v3.TypedExtensionConfig typed_extension_config = 4; } // Each client will iterate over the list in order and stop at the first policy that it diff --git a/api/envoy/config/cluster/v4alpha/BUILD b/api/envoy/config/cluster/v4alpha/BUILD deleted file mode 100644 index 02eb1b1917251..0000000000000 --- a/api/envoy/config/cluster/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/api/envoy/config/cluster/v4alpha/circuit_breaker.proto b/api/envoy/config/cluster/v4alpha/circuit_breaker.proto deleted file mode 100644 index 36aebb8977800..0000000000000 --- a/api/envoy/config/cluster/v4alpha/circuit_breaker.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; -option java_outer_classname = "CircuitBreakerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Circuit breakers] - -// :ref:`Circuit breaking` settings can be -// specified individually for each defined priority. -message CircuitBreakers { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.CircuitBreakers"; - - // A Thresholds defines CircuitBreaker settings for a - // :ref:`RoutingPriority`. - // [#next-free-field: 9] - message Thresholds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.CircuitBreakers.Thresholds"; - - message RetryBudget { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget"; - - // Specifies the limit on concurrent retries as a percentage of the sum of active requests and - // active pending requests. For example, if there are 100 active requests and the - // budget_percent is set to 25, there may be 25 active retries. - // - // This parameter is optional. Defaults to 20%. - type.v3.Percent budget_percent = 1; - - // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the - // number of active retries may never go below this number. - // - // This parameter is optional. Defaults to 3. - google.protobuf.UInt32Value min_retry_concurrency = 2; - } - - // The :ref:`RoutingPriority` - // the specified CircuitBreaker settings apply to. - core.v4alpha.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; - - // The maximum number of connections that Envoy will make to the upstream - // cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_connections = 2; - - // The maximum number of pending requests that Envoy will allow to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 3; - - // The maximum number of parallel requests that Envoy will make to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_requests = 4; - - // The maximum number of parallel retries that Envoy will allow to the - // upstream cluster. If not specified, the default is 3. - google.protobuf.UInt32Value max_retries = 5; - - // Specifies a limit on concurrent retries in relation to the number of active requests. This - // parameter is optional. - // - // .. note:: - // - // If this field is set, the retry budget will override any configured retry circuit - // breaker. - RetryBudget retry_budget = 8; - - // If track_remaining is true, then stats will be published that expose - // the number of resources remaining until the circuit breakers open. If - // not specified, the default is false. - // - // .. note:: - // - // If a retry budget is used in lieu of the max_retries circuit breaker, - // the remaining retry resources remaining will not be tracked. - bool track_remaining = 6; - - // The maximum number of connection pools per cluster that Envoy will concurrently support at - // once. If not specified, the default is unlimited. Set this for clusters which create a - // large number of connection pools. See - // :ref:`Circuit Breaking ` for - // more details. - google.protobuf.UInt32Value max_connection_pools = 7; - } - - // If multiple :ref:`Thresholds` - // are defined with the same :ref:`RoutingPriority`, - // the first one in the list is used. If no Thresholds is defined for a given - // :ref:`RoutingPriority`, the default values - // are used. - repeated Thresholds thresholds = 1; -} diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto deleted file mode 100644 index ebd007bca5780..0000000000000 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ /dev/null @@ -1,1047 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v4alpha; - -import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; -import "envoy/config/cluster/v4alpha/filter.proto"; -import "envoy/config/cluster/v4alpha/outlier_detection.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/health_check.proto"; -import "envoy/config/core/v4alpha/resolver.proto"; -import "envoy/config/endpoint/v3/endpoint.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/collection_entry.proto"; - -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Cluster configuration] - -// Cluster list collections. Entries are *Cluster* resources or references. -// [#not-implemented-hide:] -message ClusterCollection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.ClusterCollection"; - - xds.core.v3.CollectionEntry entries = 1; -} - -// Configuration for a single upstream cluster. -// [#next-free-field: 56] -message Cluster { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; - - // Refer to :ref:`service discovery type ` - // for an explanation on each type. - enum DiscoveryType { - // Refer to the :ref:`static discovery type` - // for an explanation. - STATIC = 0; - - // Refer to the :ref:`strict DNS discovery - // type` - // for an explanation. - STRICT_DNS = 1; - - // Refer to the :ref:`logical DNS discovery - // type` - // for an explanation. - LOGICAL_DNS = 2; - - // Refer to the :ref:`service discovery type` - // for an explanation. - EDS = 3; - - // Refer to the :ref:`original destination discovery - // type` - // for an explanation. - ORIGINAL_DST = 4; - } - - // Refer to :ref:`load balancer type ` architecture - // overview section for information on each type. - enum LbPolicy { - reserved 4; - - reserved "ORIGINAL_DST_LB"; - - // Refer to the :ref:`round robin load balancing - // policy` - // for an explanation. - ROUND_ROBIN = 0; - - // Refer to the :ref:`least request load balancing - // policy` - // for an explanation. - LEAST_REQUEST = 1; - - // Refer to the :ref:`ring hash load balancing - // policy` - // for an explanation. - RING_HASH = 2; - - // Refer to the :ref:`random load balancing - // policy` - // for an explanation. - RANDOM = 3; - - // Refer to the :ref:`Maglev load balancing policy` - // for an explanation. - MAGLEV = 5; - - // This load balancer type must be specified if the configured cluster provides a cluster - // specific load balancer. Consult the configured cluster's documentation for whether to set - // this option or not. - CLUSTER_PROVIDED = 6; - - // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy - // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] - LOAD_BALANCING_POLICY_CONFIG = 7; - } - - // When V4_ONLY is selected, the DNS resolver will only perform a lookup for - // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will - // only perform a lookup for addresses in the IPv6 family. If AUTO is - // specified, the DNS resolver will first perform a lookup for addresses in - // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. - // For cluster types other than - // :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS`, - // this setting is - // ignored. - enum DnsLookupFamily { - AUTO = 0; - V4_ONLY = 1; - V6_ONLY = 2; - } - - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - - // TransportSocketMatch specifies what transport socket config will be used - // when the match conditions are satisfied. - message TransportSocketMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.TransportSocketMatch"; - - // The name of the match, used in stats generation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria. - // The connection to the endpoint with metadata matching what is set in this field - // will use the transport socket configuration specified here. - // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match - // against the values specified in this field. - google.protobuf.Struct match = 2; - - // The configuration of the transport socket. - // [#extension-category: envoy.transport_sockets.upstream] - core.v4alpha.TransportSocket transport_socket = 3; - } - - // Extended cluster type. - message CustomClusterType { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CustomClusterType"; - - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - // [#extension-category: envoy.clusters] - google.protobuf.Any typed_config = 2; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.EdsClusterConfig"; - - // Configuration for the source of EDS updates for this Cluster. - core.v4alpha.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. This may be a xdstp:// URL. - string service_name = 2; - } - - // Optionally divide the endpoints in this cluster into subsets defined by - // endpoint metadata and selected by route and weighted cluster metadata. - // [#next-free-field: 8] - message LbSubsetConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.LbSubsetConfig"; - - // If NO_FALLBACK is selected, a result - // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, - // any cluster endpoint may be returned (subject to policy, health checks, - // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - enum LbSubsetFallbackPolicy { - NO_FALLBACK = 0; - ANY_ENDPOINT = 1; - DEFAULT_SUBSET = 2; - } - - // Specifications for subsets. - message LbSubsetSelector { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector"; - - // Allows to override top level fallback policy per selector. - enum LbSubsetSelectorFallbackPolicy { - // If NOT_DEFINED top level config fallback policy is used instead. - NOT_DEFINED = 0; - - // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. - NO_FALLBACK = 1; - - // If ANY_ENDPOINT is selected, any cluster endpoint may be returned - // (subject to policy, health checks, etc). - ANY_ENDPOINT = 2; - - // If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - DEFAULT_SUBSET = 3; - - // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata - // keys reduced to - // :ref:`fallback_keys_subset`. - // It allows for a fallback to a different, less specific selector if some of the keys of - // the selector are considered optional. - KEYS_SUBSET = 4; - } - - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - - // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for - // choosing a host, but updating hosts is faster, especially for large numbers of hosts. - // - // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy. - // - // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains - // only one entry. - // - // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys` - // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge - // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are - // present in the current configuration. - bool single_host_per_subset = 4; - - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum = {defined_only: true}]; - - // Subset of - // :ref:`keys` used by - // :ref:`KEYS_SUBSET` - // fallback policy. - // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - // For any other fallback policy the parameter is not used and should not be set. - // Only values also present in - // :ref:`keys` are allowed, but - // `fallback_keys_subset` cannot be equal to `keys`. - repeated string fallback_keys_subset = 3; - } - - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - - // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique - // combination of key and value. For example: - // - // .. code-block:: json - // - // { "subset_selectors": [ - // { "keys": [ "version" ] }, - // { "keys": [ "stage", "hardware_type" ] } - // ]} - // - // A subset is matched when the metadata from the selected route and - // weighted cluster contains the same keys and values as the subset's - // metadata. The same host may appear in multiple subsets. - repeated LbSubsetSelector subset_selectors = 3; - - // If true, routing to subsets will take into account the localities and locality weights of the - // endpoints when making the routing decision. - // - // There are some potential pitfalls associated with enabling this feature, as the resulting - // traffic split after applying both a subset match and locality weights might be undesirable. - // - // Consider for example a situation in which you have 50/50 split across two localities X/Y - // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - // host selected but Y having 100, then a lot more load is being dumped on the single host in X - // than originally anticipated in the load balancing assignment delivered via EDS. - bool locality_weight_aware = 4; - - // When used with locality_weight_aware, scales the weight of each locality by the ratio - // of hosts in the subset vs hosts in the original subset. This aims to even out the load - // going to an individual locality if said locality is disproportionately affected by the - // subset predicate. - bool scale_locality_weight = 5; - - // If true, when a fallback policy is configured and its corresponding subset fails to find - // a host this will cause any host to be selected instead. - // - // This is useful when using the default subset as the fallback policy, given the default - // subset might become empty. With this option enabled, if that happens the LB will attempt - // to select a host from the entire cluster. - bool panic_mode_any = 6; - - // If true, metadata specified for a metadata key will be matched against the corresponding - // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - // and any of the elements in the list matches the criteria. - bool list_as_any = 7; - } - - // Specific configuration for the LeastRequest load balancing policy. - message LeastRequestLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.LeastRequestLbConfig"; - - // The number of random healthy hosts from which the host with the fewest active requests will - // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; - - // The following formula is used to calculate the dynamic weights when hosts have different load - // balancing weights: - // - // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` - // - // The larger the active request bias is, the more aggressively active requests will lower the - // effective weight when all host weights are not equal. - // - // `active_request_bias` must be greater than or equal to 0.0. - // - // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number - // of active requests at the time it picks a host and behaves like the Round Robin Load - // Balancer. - // - // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing - // weight by the number of active requests at the time it does a pick. - // - // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's - // host sets changes, e.g., whenever there is a host membership update or a host load balancing - // weight change. - // - // .. note:: - // This setting only takes effect if all host weights are not equal. - core.v4alpha.RuntimeDouble active_request_bias = 2; - } - - // Specific configuration for the :ref:`RingHash` - // load balancing policy. - message RingHashLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.RingHashLbConfig"; - - // The hash function used to hash hosts onto the ketama ring. - enum HashFunction { - // Use `xxHash `_, this is the default hash function. - XX_HASH = 0; - - // Use `MurmurHash2 `_, this is compatible with - // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled - // on Linux and not macOS. - MURMUR_HASH_2 = 1; - } - - reserved 2; - - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; - - // The hash function used to hash hosts onto the ketama ring. The value defaults to - // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; - - // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - // to further constrain resource use. See also - // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; - } - - // Specific configuration for the :ref:`Maglev` - // load balancing policy. - message MaglevLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.MaglevLbConfig"; - - // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. - // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same - // upstream as it was before. Increasing the table size reduces the amount of disruption. - // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. - google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; - } - - // Specific configuration for the - // :ref:`Original Destination ` - // load balancing policy. - message OriginalDstLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.OriginalDstLbConfig"; - - // When true, :ref:`x-envoy-original-dst-host - // ` can be used to override destination - // address. - // - // .. attention:: - // - // This header isn't sanitized by default, so enabling this feature allows HTTP clients to - // route traffic to arbitrary hosts and/or ports, which may have serious security - // consequences. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - bool use_http_header = 1; - } - - // Common configuration for all load balancer implementations. - // [#next-free-field: 8] - message CommonLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CommonLbConfig"; - - // Configuration for :ref:`zone aware routing - // `. - message ZoneAwareLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig"; - - // Configures percentage of requests that will be considered for zone aware routing - // if zone aware routing is configured. If not specified, the default is 100%. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - type.v3.Percent routing_enabled = 1; - - // Configures minimum upstream cluster size required for zone aware routing - // If upstream cluster size is less than specified, zone aware routing is not performed - // even if zone aware routing is configured. If not specified, the default is 6. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - google.protobuf.UInt64Value min_cluster_size = 2; - - // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - // mode`. Instead, the cluster will fail all - // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - // failing service. - bool fail_traffic_on_panic = 3; - } - - // Configuration for :ref:`locality weighted load balancing - // ` - message LocalityWeightedLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; - } - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - message ConsistentHashingLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; - - // If set to `true`, the cluster will use hostname instead of the resolved - // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. - bool use_hostname_for_hashing = 1; - - // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 - // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. - // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. - // Minimum is 100. - // - // Applies to both Ring Hash and Maglev load balancers. - // - // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified - // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests - // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing - // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify - // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the - // cascading overflow effect when choosing the next host in the ring/table). - // - // If weights are specified on the hosts, they are respected. - // - // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts - // being probed, so use a higher value if you require better performance. - google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}]; - } - - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - type.v3.Percent healthy_panic_threshold = 1; - - oneof locality_config_specifier { - ZoneAwareLbConfig zone_aware_lb_config = 2; - - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } - - // If set, all health check/weight/metadata updates that happen within this duration will be - // merged and delivered in one shot when the duration expires. The start of the duration is when - // the first update happens. This is useful for big clusters, with potentially noisy deploys - // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - // cluster). Please always keep in mind that the use of sandbox technologies may change this - // behavior. - // - // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - // window to 0. - // - // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - // because merging those updates isn't currently safe. See - // https://github.com/envoyproxy/envoy/pull/3941. - google.protobuf.Duration update_merge_window = 4; - - // If set to true, Envoy will :ref:`exclude ` new hosts - // when computing load balancing weights until they have been health checked for the first time. - // This will have no effect unless active health checking is also configured. - bool ignore_new_hosts_until_first_hc = 5; - - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - bool close_connections_on_host_set_change = 6; - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - ConsistentHashingLbConfig consistent_hashing_lb_config = 7; - } - - message RefreshRate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.RefreshRate"; - - // Specifies the base interval between refreshes. This parameter is required and must be greater - // than zero and less than - // :ref:`max_interval `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {nanos: 1000000} - }]; - - // Specifies the maximum interval between refreshes. This parameter is optional, but must be - // greater than or equal to the - // :ref:`base_interval ` if set. The default - // is 10 times the :ref:`base_interval `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; - } - - message PreconnectPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.PreconnectPolicy"; - - // Indicates how many streams (rounded up) can be anticipated per-upstream for each - // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting - // will only be done if the upstream is healthy and the cluster has traffic. - // - // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be - // established, one for the new incoming stream, and one for a presumed follow-up stream. For - // HTTP/2, only one connection would be established by default as one connection can - // serve both the original and presumed follow-up stream. - // - // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 - // active streams, there would be 100 connections in use, and 50 connections preconnected. - // This might be a useful value for something like short lived single-use connections, - // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection - // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP - // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more - // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue - // in case of unexpected disconnects where the connection could not be reused. - // - // If this value is not set, or set explicitly to one, Envoy will fetch as many connections - // as needed to serve streams in flight. This means in steady state if a connection is torn down, - // a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. - // - // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can - // harm latency more than the preconnecting helps. - google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 - [(validate.rules).double = {lte: 3.0 gte: 1.0}]; - - // Indicates how many many streams (rounded up) can be anticipated across a cluster for each - // stream, useful for low QPS services. This is currently supported for a subset of - // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike *per_upstream_preconnect_ratio* this preconnects across the upstream instances in a - // cluster, doing best effort predictions of what upstream would be picked next and - // pre-establishing a connection. - // - // Preconnecting will be limited to one preconnect per configured upstream in the cluster and will - // only be done if there are healthy upstreams and the cluster has traffic. - // - // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first - // incoming stream, 2 connections will be preconnected - one to the first upstream for this - // cluster, one to the second on the assumption there will be a follow-up stream. - // - // If this value is not set, or set explicitly to one, Envoy will fetch as many connections - // as needed to serve streams in flight, so during warm up and in steady state if a connection - // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for - // connection establishment. - // - // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, - // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each - // upstream. - google.protobuf.DoubleValue predictive_preconnect_ratio = 2 - [(validate.rules).double = {lte: 3.0 gte: 1.0}]; - } - - reserved 12, 15, 7, 11, 35, 9, 46, 29, 13, 14, 18, 45, 26, 47; - - reserved "hosts", "tls_context", "extension_protocol_options", "max_requests_per_connection", - "upstream_http_protocol_options", "common_http_protocol_options", "http_protocol_options", - "http2_protocol_options", "dns_resolvers", "use_tcp_for_dns_lookups", "protocol_selection", - "track_timeout_budgets"; - - // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket_match* in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "enableMTLS" - // match: - // acceptMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - name: "defaultToPlaintext" - // match: {} - // transport_socket: - // name: envoy.transport_sockets.raw_buffer - // - // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* - // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - // - // If a :ref:`socket match ` with empty match - // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - // socket match in case above. - // - // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - // *transport_socket* specified in this cluster. - // - // This field allows gradual and flexible transport socket configuration changes. - // - // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - // has "acceptPlaintext": "true" metadata information. - // - // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - // *TransportSocketMatch* in this field. Other client Envoys receive CDS without - // *transport_socket_match* set, and still send plain text traffic to the same cluster. - // - // This field can be used to specify custom transport socket configurations for health - // checks by adding matching key/value pairs in a health check's - // :ref:`transport socket match criteria ` field. - // - // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - repeated TransportSocketMatch transport_socket_matches = 43; - - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // An optional alternative to the cluster name to be used for observability. This name is used - // emitting stats for the cluster and access logging the cluster name. This will appear as - // additional information in configuration dumps of a cluster's current status as - // :ref:`observability_name ` - // and as an additional tag "upstream_cluster.name" while tracing. Note: access logging using - // this field is presently enabled with runtime feature - // `envoy.reloadable_features.use_observable_cluster_name`. Any ``:`` in the name will be - // converted to ``_`` when emitting statistics. This should not be confused with :ref:`Router - // Filter Header `. - string observability_name = 28; - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - // If not set, a default value of 5s will be used. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 - [(udpa.annotations.security).configure_for_untrusted_upstream = true]; - - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes the *hosts* field in the v2 API. - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // - endpoint.v3.ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.v4alpha.HealthCheck health_checks = 8; - - // Optional :ref:`circuit breaking ` for the cluster. - CircuitBreakers circuit_breakers = 10; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - // [#next-major-version: make this a list of typed extensions.] - map typed_extension_protocol_options = 36; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. The value configured must be at least 1ms. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; - - // If the DNS failure refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - // other than :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS` this setting is - // ignored. - RefreshRate dns_failure_refresh_rate = 44; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; - - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v4alpha.DnsResolutionConfig dns_resolution_config = 53; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - core.v4alpha.TypedExtensionConfig typed_dns_resolver_config = 55; - - // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`. - // If true, cluster readiness blocks on warm-up. If false, the cluster will complete - // initialization whether or not warm-up has completed. Defaults to true. - google.protobuf.BoolValue wait_for_warm_on_init = 54; - - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - OutlierDetection outlier_detection = 19; - - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; - - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.v4alpha.BindConfig upstream_bind_config = 21; - - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH`, - // :ref:`MAGLEV` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - - // Optional configuration for the Maglev load balancing policy. - MaglevLbConfig maglev_lb_config = 52; - - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - - // Common configuration for all load balancer implementations. - CommonLbConfig common_lb_config = 27; - - // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`UpstreamTlsContexts ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - core.v4alpha.TransportSocket transport_socket = 24; - - // The Metadata field can be used to provide additional information about the - // cluster. It can be used for stats, logging, and varying filter behavior. - // Fields should use reverse DNS notation to denote which entity within Envoy - // will need the information. For instance, if the metadata is intended for - // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - core.v4alpha.Metadata metadata = 25; - - // Optional options for upstream connections. - UpstreamConnectionOptions upstream_connection_options = 30; - - // If an upstream host becomes unhealthy (as determined by the configured health checks - // or outlier detection), immediately close all connections to the failed host. - // - // .. note:: - // - // This is currently only supported for connections created by tcp_proxy. - // - // .. note:: - // - // The current implementation of this feature closes all connections immediately when - // the unhealthy status is detected. If there are a large number of connections open - // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - // time exclusively closing these connections, and not processing any other traffic. - bool close_connections_on_host_health_failure = 31; - - // If set to true, Envoy will ignore the health value of a host when processing its removal - // from service discovery. This means that if active health checking is used, Envoy will *not* - // wait for the endpoint to go unhealthy before removing it. - bool ignore_health_on_host_removal = 32; - - // An (optional) network filter chain, listed in the order the filters should be applied. - // The chain will be applied to all outgoing connections that Envoy makes to the upstream - // servers of this cluster. - repeated Filter filters = 40; - - // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - LoadBalancingPolicy load_balancing_policy = 41; - - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - core.v4alpha.ConfigSource lrs_server = 42; - - // Optional customization and configuration of upstream connection pool, and upstream type. - // - // Currently this field only applies for HTTP traffic but is designed for eventual use for custom - // TCP upstreams. - // - // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream - // HTTP, using the http connection pool and the codec from `http2_protocol_options` - // - // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT - // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. - // - // The default pool used is the generic connection pool which creates the HTTP upstream for most - // HTTP requests, and the TCP upstream if CONNECT termination is configured. - // - // If users desire custom connection pool or upstream behavior, for example terminating - // CONNECT only if a custom filter indicates it is appropriate, the custom factories - // can be registered and configured here. - // [#extension-category: envoy.upstreams] - core.v4alpha.TypedExtensionConfig upstream_config = 48; - - // Configuration to track optional cluster stats. - TrackClusterStats track_cluster_stats = 49; - - // Preconnect configuration for this cluster. - PreconnectPolicy preconnect_policy = 50; - - // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate - // connection pool for every downstream connection - bool connection_pool_per_downstream_connection = 51; -} - -// [#not-implemented-hide:] Extensible load balancing policy configuration. -// -// Every LB policy defined via this mechanism will be identified via a unique name using reverse -// DNS notation. If the policy needs configuration parameters, it must define a message for its -// own configuration, which will be stored in the config field. The name of the policy will tell -// clients which type of message they should expect to see in the config field. -// -// Note that there are cases where it is useful to be able to independently select LB policies -// for choosing a locality and for choosing an endpoint within that locality. For example, a -// given deployment may always use the same policy to choose the locality, but for choosing the -// endpoint within the locality, some clusters may use weighted-round-robin, while others may -// use some sort of session-based balancing. -// -// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a -// child LB policy for each locality. For each request, the parent chooses the locality and then -// delegates to the child policy for that locality to choose the endpoint within the locality. -// -// To facilitate this, the config message for the top-level LB policy may include a field of -// type LoadBalancingPolicy that specifies the child policy. -message LoadBalancingPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.LoadBalancingPolicy"; - - message Policy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.LoadBalancingPolicy.Policy"; - - reserved 2; - - reserved "config"; - - // Required. The name of the LB policy. - string name = 1; - - google.protobuf.Any typed_config = 3; - } - - // Each client will iterate over the list in order and stop at the first policy that it - // supports. This provides a mechanism for starting to use new LB policies that are not yet - // supported by all clients. - repeated Policy policies = 1; -} - -// An extensible structure containing the address Envoy should bind to when -// establishing upstream connections. -message UpstreamBindConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.UpstreamBindConfig"; - - // The address Envoy should bind to when establishing upstream connections. - core.v4alpha.Address source_address = 1; -} - -message UpstreamConnectionOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.UpstreamConnectionOptions"; - - // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - core.v4alpha.TcpKeepalive tcp_keepalive = 1; -} - -message TrackClusterStats { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.TrackClusterStats"; - - // If timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool timeout_budgets = 1; - - // If request_response_sizes is true, then the :ref:`histograms - // ` tracking header and body sizes - // of requests and responses will be published. - bool request_response_sizes = 2; -} diff --git a/api/envoy/config/cluster/v4alpha/filter.proto b/api/envoy/config/cluster/v4alpha/filter.proto deleted file mode 100644 index d478fd34f1c79..0000000000000 --- a/api/envoy/config/cluster/v4alpha/filter.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v4alpha; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; -option java_outer_classname = "FilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Upstream filters] -// Upstream filters apply to the connections to the upstream cluster hosts. - -message Filter { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Filter"; - - // The name of the filter to instantiate. The name must match a - // supported upstream filter. Note that Envoy's :ref:`downstream network - // filters ` are not valid upstream filters. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any typed_config = 2; -} diff --git a/api/envoy/config/cluster/v4alpha/outlier_detection.proto b/api/envoy/config/cluster/v4alpha/outlier_detection.proto deleted file mode 100644 index a64c4b42247fc..0000000000000 --- a/api/envoy/config/cluster/v4alpha/outlier_detection.proto +++ /dev/null @@ -1,157 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v4alpha; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; -option java_outer_classname = "OutlierDetectionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Outlier detection] - -// See the :ref:`architecture overview ` for -// more information on outlier detection. -// [#next-free-field: 22] -message OutlierDetection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.OutlierDetection"; - - // The number of consecutive 5xx responses or local origin errors that are mapped - // to 5xx error codes before a consecutive 5xx ejection - // occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_5xx = 1; - - // The time interval between ejection analysis sweeps. This can result in - // both new ejections as well as hosts being returned to service. Defaults - // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; - - // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected and is - // capped by :ref:`max_ejection_time`. - // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; - - // The maximum % of an upstream cluster that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive 5xx. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics. This setting can be used to - // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; - - // The number of hosts in a cluster that must have enough request volume to - // detect success rate outliers. If the number of hosts is less than this - // setting, outlier detection via success rate statistics is not performed - // for any host in the cluster. Defaults to 5. - google.protobuf.UInt32Value success_rate_minimum_hosts = 7; - - // The minimum number of total requests that must be collected in one - // interval (as defined by the interval duration above) to include this host - // in success rate based outlier detection. If the volume is lower than this - // setting, outlier detection via success rate statistics is not performed - // for that host. Defaults to 100. - google.protobuf.UInt32Value success_rate_request_volume = 8; - - // This factor is used to determine the ejection threshold for success rate - // outlier ejection. The ejection threshold is the difference between the - // mean success rate, and the product of this factor and the standard - // deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - google.protobuf.UInt32Value success_rate_stdev_factor = 9; - - // The number of consecutive gateway failures (502, 503, 504 status codes) - // before a consecutive gateway failure ejection occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_gateway_failure = 10; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive gateway failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32 = {lte: 100}]; - - // Determines whether to distinguish local origin failures from external errors. If set to true - // the following configuration parameters are taken into account: - // :ref:`consecutive_local_origin_failure`, - // :ref:`enforcing_consecutive_local_origin_failure` - // and - // :ref:`enforcing_local_origin_success_rate`. - // Defaults to false. - bool split_external_local_origin_errors = 12; - - // The number of consecutive locally originated failures before ejection - // occurs. Defaults to 5. Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value consecutive_local_origin_failure = 13; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive locally originated failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics for locally originated errors. - // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32 = {lte: 100}]; - - // The failure percentage to use when determining failure percentage-based outlier detection. If - // the failure percentage of a given host is greater than or equal to this value, it will be - // ejected. Defaults to 85. - google.protobuf.UInt32Value failure_percentage_threshold = 16 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // failure percentage statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 0. - // - // [#next-major-version: setting this without setting failure_percentage_threshold should be - // invalid in v4.] - google.protobuf.UInt32Value enforcing_failure_percentage = 17 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // local-origin failure percentage statistics. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 - [(validate.rules).uint32 = {lte: 100}]; - - // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. - // If the total number of hosts in the cluster is less than this value, failure percentage-based - // ejection will not be performed. Defaults to 5. - google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; - - // The minimum number of total requests that must be collected in one interval (as defined by the - // interval duration above) to perform failure percentage-based ejection for this host. If the - // volume is lower than this setting, failure percentage-based ejection will not be performed for - // this host. Defaults to 50. - google.protobuf.UInt32Value failure_percentage_request_volume = 20; - - // The maximum time that a host is ejected for. See :ref:`base_ejection_time` - // for more information. If not specified, the default value (300000ms or 300s) or - // :ref:`base_ejection_time` value is applied, whatever is larger. - google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; -} diff --git a/api/envoy/config/common/matcher/v4alpha/BUILD b/api/envoy/config/common/matcher/v4alpha/BUILD deleted file mode 100644 index 8c0f8a2e08d8b..0000000000000 --- a/api/envoy/config/common/matcher/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/matcher/v3:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/common/matcher/v4alpha/matcher.proto b/api/envoy/config/common/matcher/v4alpha/matcher.proto deleted file mode 100644 index 2027331b31da3..0000000000000 --- a/api/envoy/config/common/matcher/v4alpha/matcher.proto +++ /dev/null @@ -1,269 +0,0 @@ -syntax = "proto3"; - -package envoy.config.common.matcher.v4alpha; - -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.common.matcher.v4alpha"; -option java_outer_classname = "MatcherProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Unified Matcher API] - -// A matcher, which may traverse a matching tree in order to result in a match action. -// During matching, the tree will be traversed until a match is found, or if no match -// is found the action specified by the most specific on_no_match will be evaluated. -// As an on_no_match might result in another matching tree being evaluated, this process -// might repeat several times until the final OnMatch (or no match) is decided. -// -// [#alpha:] -message Matcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher"; - - // What to do if a match is successful. - message OnMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.OnMatch"; - - oneof on_match { - option (validate.required) = true; - - // Nested matcher to evaluate. - // If the nested matcher does not match and does not specify - // on_no_match, then this matcher is considered not to have - // matched, even if a predicate at this level or above returned - // true. - Matcher matcher = 1; - - // Protocol-specific action to take. - core.v4alpha.TypedExtensionConfig action = 2; - } - } - - // A linear list of field matchers. - // The field matchers are evaluated in order, and the first match - // wins. - message MatcherList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList"; - - // Predicate to determine if a match is successful. - message Predicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate"; - - // Predicate for a single input field. - message SinglePredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate"; - - // Protocol-specific specification of input field to match on. - // [#extension-category: envoy.matching.common_inputs] - core.v4alpha.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; - - oneof matcher { - option (validate.required) = true; - - // Built-in string matcher. - type.matcher.v4alpha.StringMatcher value_match = 2; - - // Extension for custom matching logic. - // [#extension-category: envoy.matching.input_matchers] - core.v4alpha.TypedExtensionConfig custom_match = 3; - } - } - - // A list of two or more matchers. Used to allow using a list within a oneof. - message PredicateList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList"; - - repeated Predicate predicate = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof match_type { - option (validate.required) = true; - - // A single predicate to evaluate. - SinglePredicate single_predicate = 1; - - // A list of predicates to be OR-ed together. - PredicateList or_matcher = 2; - - // A list of predicates to be AND-ed together. - PredicateList and_matcher = 3; - - // The invert of a predicate - Predicate not_matcher = 4; - } - } - - // An individual matcher. - message FieldMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher"; - - // Determines if the match succeeds. - Predicate predicate = 1 [(validate.rules).message = {required: true}]; - - // What to do if the match succeeds. - OnMatch on_match = 2 [(validate.rules).message = {required: true}]; - } - - // A list of matchers. First match wins. - repeated FieldMatcher matchers = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - message MatcherTree { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherTree"; - - // A map of configured matchers. Used to allow using a map within a oneof. - message MatchMap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap"; - - map map = 1 [(validate.rules).map = {min_pairs: 1}]; - } - - // Protocol-specific specification of input field to match on. - core.v4alpha.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; - - // Exact or prefix match maps in which to look up the input value. - // If the lookup succeeds, the match is considered successful, and - // the corresponding OnMatch is used. - oneof tree_type { - option (validate.required) = true; - - MatchMap exact_match_map = 2; - - // Longest matching prefix wins. - MatchMap prefix_match_map = 3; - - // Extension for custom matching logic. - core.v4alpha.TypedExtensionConfig custom_match = 4; - } - } - - oneof matcher_type { - option (validate.required) = true; - - // A linear list of matchers to evaluate. - MatcherList matcher_list = 1; - - // A match tree to evaluate. - MatcherTree matcher_tree = 2; - } - - // Optional OnMatch to use if the matcher failed. - // If specified, the OnMatch is used, and the matcher is considered - // to have matched. - // If not specified, the matcher is considered not to have matched. - OnMatch on_no_match = 3; -} - -// Match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 11] -message MatchPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.MatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.MatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - - // HTTP request generic body match configuration. - HttpGenericBodyMatch http_request_generic_body_match = 9; - - // HTTP response generic body match configuration. - HttpGenericBodyMatch http_response_generic_body_match = 10; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.HttpHeadersMatch"; - - // HTTP headers to match. - repeated route.v4alpha.HeaderMatcher headers = 1; -} - -// HTTP generic body match configuration. -// List of text strings and hex strings to be located in HTTP body. -// All specified strings must be found in the HTTP body for positive match. -// The search may be limited to specified number of bytes from the body start. -// -// .. attention:: -// -// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. -// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified -// to scan only part of the http body. -message HttpGenericBodyMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.HttpGenericBodyMatch"; - - message GenericTextMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch"; - - oneof rule { - option (validate.required) = true; - - // Text string to be located in HTTP body. - string string_match = 1 [(validate.rules).string = {min_len: 1}]; - - // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; - } - } - - // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). - uint32 bytes_limit = 1; - - // List of patterns to match. - repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 6589a3ed3a1a4..8f2347eb55179 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -73,7 +73,7 @@ message UpstreamHttpProtocolOptions { // Configures the alternate protocols cache which tracks alternate protocols that can be used to // make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for -// HTTP Alternate Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 +// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 // for the "HTTPS" DNS resource record. message AlternateProtocolsCacheOptions { // The name of the cache. Multiple named caches allow independent alternate protocols cache diff --git a/api/envoy/config/core/v4alpha/BUILD b/api/envoy/config/core/v4alpha/BUILD deleted file mode 100644 index c9e435fda9a99..0000000000000 --- a/api/envoy/config/core/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/api/envoy/config/core/v4alpha/address.proto b/api/envoy/config/core/v4alpha/address.proto deleted file mode 100644 index 63d4d4a145075..0000000000000 --- a/api/envoy/config/core/v4alpha/address.proto +++ /dev/null @@ -1,163 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/socket_option.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "AddressProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Network addresses] - -message Pipe { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Pipe"; - - // Unix Domain Socket path. On Linux, paths starting with '@' will use the - // abstract namespace. The starting '@' is replaced by a null byte by Envoy. - // Paths starting with '@' will result in an error in environments other than - // Linux. - string path = 1 [(validate.rules).string = {min_len: 1}]; - - // The mode for the Pipe. Not applicable for abstract sockets. - uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; -} - -// [#not-implemented-hide:] The address represents an envoy internal listener. -// TODO(lambdai): Make this address available for listener and endpoint. -// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. -message EnvoyInternalAddress { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.EnvoyInternalAddress"; - - oneof address_name_specifier { - option (validate.required) = true; - - // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. - string server_listener_name = 1; - } -} - -// [#next-free-field: 7] -message SocketAddress { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketAddress"; - - enum Protocol { - TCP = 0; - UDP = 1; - } - - Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; - - // The address for this socket. :ref:`Listeners ` will bind - // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` - // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: - // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used - // within an upstream :ref:`BindConfig `, the address - // controls the source address of outbound connections. For :ref:`clusters - // `, the cluster type determines whether the - // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_len: 1}]; - - oneof port_specifier { - option (validate.required) = true; - - uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - string named_port = 4; - } - - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. - string resolver_name = 5; - - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. - bool ipv4_compat = 6; -} - -message TcpKeepalive { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.TcpKeepalive"; - - // Maximum number of keepalive probes to send without response before deciding - // the connection is dead. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 9.) - google.protobuf.UInt32Value keepalive_probes = 1; - - // The number of seconds a connection needs to be idle before keep-alive probes - // start being sent. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 7200s (i.e., 2 hours.) - google.protobuf.UInt32Value keepalive_time = 2; - - // The number of seconds between keep-alive probes. Default is to use the OS - // level configuration (unless overridden, Linux defaults to 75s.) - google.protobuf.UInt32Value keepalive_interval = 3; -} - -message BindConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BindConfig"; - - // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; - - // Whether to set the *IP_FREEBIND* option when creating the socket. When this - // flag is set to true, allows the :ref:`source_address - // ` to be an IP address - // that is not configured on the system running Envoy. When this flag is set - // to false, the option *IP_FREEBIND* is disabled on the socket. When this - // flag is not set (default), the socket is not modified, i.e. the option is - // neither enabled nor disabled. - google.protobuf.BoolValue freebind = 2; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated SocketOption socket_options = 3; -} - -// Addresses specify either a logical or physical address and port, which are -// used to tell Envoy where to bind/listen, connect to upstream and find -// management servers. -message Address { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Address"; - - oneof address { - option (validate.required) = true; - - SocketAddress socket_address = 1; - - Pipe pipe = 2; - - // [#not-implemented-hide:] - EnvoyInternalAddress envoy_internal_address = 3; - } -} - -// CidrRange specifies an IP Address and a prefix length to construct -// the subnet mask for a `CIDR `_ range. -message CidrRange { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.CidrRange"; - - // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; -} diff --git a/api/envoy/config/core/v4alpha/backoff.proto b/api/envoy/config/core/v4alpha/backoff.proto deleted file mode 100644 index 266d57f84e74a..0000000000000 --- a/api/envoy/config/core/v4alpha/backoff.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "BackoffProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Backoff Strategy] - -// Configuration defining a jittered exponential back off strategy. -message BackoffStrategy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.BackoffStrategy"; - - // The base interval to be used for the next back off computation. It should - // be greater than zero and less than or equal to :ref:`max_interval - // `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, - // but must be greater than or equal to the :ref:`base_interval - // ` if set. The default - // is 10 times the :ref:`base_interval - // `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; -} diff --git a/api/envoy/config/core/v4alpha/base.proto b/api/envoy/config/core/v4alpha/base.proto deleted file mode 100644 index b9980eff49ca5..0000000000000 --- a/api/envoy/config/core/v4alpha/base.proto +++ /dev/null @@ -1,456 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/backoff.proto"; -import "envoy/config/core/v4alpha/http_uri.proto"; -import "envoy/type/v3/percent.proto"; -import "envoy/type/v3/semantic_version.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/context_params.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "BaseProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common types] - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; -} - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} - -// Identifies location of where either Envoy runs or where upstream hosts run. -message Locality { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Locality"; - - // Region this :ref:`zone ` belongs to. - string region = 1; - - // Defines the local service zone where Envoy is running. Though optional, it - // should be set if discovery service routing is used and the discovery - // service exposes :ref:`zone data `, - // either in this message or via :option:`--service-zone`. The meaning of zone - // is context dependent, e.g. `Availability Zone (AZ) - // `_ - // on AWS, `Zone `_ on - // GCP, etc. - string zone = 2; - - // When used for locality of upstream hosts, this field further splits zone - // into smaller chunks of sub-zones so they can be load balanced - // independently. - string sub_zone = 3; -} - -// BuildVersion combines SemVer version of extension with free-form build information -// (i.e. 'alpha', 'private-build') as a set of strings. -message BuildVersion { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BuildVersion"; - - // SemVer version of extension. - type.v3.SemanticVersion version = 1; - - // Free-form build information. - // Envoy defines several well known keys in the source/common/version/version.h file - google.protobuf.Struct metadata = 2; -} - -// Version and identification for an Envoy extension. -// [#next-free-field: 6] -message Extension { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Extension"; - - // This is the name of the Envoy filter as specified in the Envoy - // configuration, e.g. envoy.filters.http.router, com.acme.widget. - string name = 1; - - // Category of the extension. - // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" - // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from - // acme.com vendor. - // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] - string category = 2; - - // [#not-implemented-hide:] Type descriptor of extension configuration proto. - // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] - // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - string type_descriptor = 3; - - // The version is a property of the extension and maintained independently - // of other extensions and the Envoy API. - // This field is not set when extension did not provide version information. - BuildVersion version = 4; - - // Indicates that the extension is present but was disabled via dynamic configuration. - bool disabled = 5; -} - -// Identifies a specific Envoy instance. The node identifier is presented to the -// management server, which may use this identifier to distinguish per Envoy -// configuration for serving. -// [#next-free-field: 13] -message Node { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Node"; - - reserved 5, 11; - - reserved "build_version", "listening_addresses"; - - // An opaque node identifier for the Envoy node. This also provides the local - // service node name. It should be set if any of the following features are - // used: :ref:`statsd `, :ref:`CDS - // `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-node`. - string id = 1; - - // Defines the local service cluster name where Envoy is running. Though - // optional, it should be set if any of the following features are used: - // :ref:`statsd `, :ref:`health check cluster - // verification - // `, - // :ref:`runtime override directory `, - // :ref:`user agent addition - // `, - // :ref:`HTTP global rate limiting `, - // :ref:`CDS `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-cluster`. - string cluster = 2; - - // Opaque metadata extending the node identifier. Envoy will pass this - // directly to the management server. - google.protobuf.Struct metadata = 3; - - // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike - // other fields in this message). For example, the xDS client may have a shard identifier that - // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the - // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic - // parameter then appears in this field during future discovery requests. - map dynamic_parameters = 12; - - // Locality specifying where the Envoy instance is running. - Locality locality = 4; - - // Free-form string that identifies the entity requesting config. - // E.g. "envoy" or "grpc" - string user_agent_name = 6; - - oneof user_agent_version_type { - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - string user_agent_version = 7; - - // Structured version of the entity requesting config. - BuildVersion user_agent_build_version = 8; - } - - // List of extensions and their versions supported by the node. - repeated Extension extensions = 9; - - // Client feature support list. These are well known features described - // in the Envoy API repository for a given major version of an API. Client features - // use reverse DNS naming scheme, for example `com.acme.feature`. - // See :ref:`the list of features ` that xDS client may - // support. - repeated string client_features = 10; -} - -// Metadata provides additional inputs to filters based on matched listeners, -// filter chains, routes and endpoints. It is structured as a map, usually from -// filter name (in reverse DNS format) to metadata specific to the filter. Metadata -// key-values for a filter are merged as connection and request handling occurs, -// with later values for the same key overriding earlier values. -// -// An example use of metadata is providing additional values to -// http_connection_manager in the envoy.http_connection_manager.access_log -// namespace. -// -// Another example use of metadata is to per service config info in cluster metadata, which may get -// consumed by multiple filters. -// -// For load balancing, Metadata provides a means to subset cluster endpoints. -// Endpoints have a Metadata object associated and routes contain a Metadata -// object to match against. There are some well defined metadata used today for -// this purpose: -// -// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an -// endpoint and is also used during header processing -// (x-envoy-upstream-canary) and for stats purposes. -// [#next-major-version: move to type/metadata/v2] -message Metadata { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Metadata"; - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - // If both *filter_metadata* and - // :ref:`typed_filter_metadata ` - // fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - map filter_metadata = 1; - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - // The value is encoded as google.protobuf.Any. - // If both :ref:`filter_metadata ` - // and *typed_filter_metadata* fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - map typed_filter_metadata = 2; -} - -// Runtime derived uint32 with a default when not specified. -message RuntimeUInt32 { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeUInt32"; - - // Default value if runtime value is not available. - uint32 default_value = 2; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived percentage with a default when not specified. -message RuntimePercent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RuntimePercent"; - - // Default value if runtime value is not available. - type.v3.Percent default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived double with a default when not specified. -message RuntimeDouble { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeDouble"; - - // Default value if runtime value is not available. - double default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived bool with a default when not specified. -message RuntimeFeatureFlag { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RuntimeFeatureFlag"; - - // Default value if runtime value is not available. - google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key to get value for comparison. This value is used if defined. The boolean value must - // be represented via its - // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Header name/value pair. -message HeaderValue { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderValue"; - - // Header name. - string key = 1 - [(validate.rules).string = - {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Header value. - // - // The same :ref:`format specifier ` as used for - // :ref:`HTTP access logging ` applies here, however - // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [ - (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; -} - -// Header name/value pair plus option to control append behavior. -message HeaderValueOption { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HeaderValueOption"; - - // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message = {required: true}]; - - // Should the value be appended? If true (default), the value is appended to - // existing values. Otherwise it replaces any existing values. - google.protobuf.BoolValue append = 2; -} - -// Wrapper for a set of headers. -message HeaderMap { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderMap"; - - repeated HeaderValue headers = 1; -} - -// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename -// events inside this directory trigger the watch. -message WatchedDirectory { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.WatchedDirectory"; - - // Directory path to watch. - string path = 1 [(validate.rules).string = {min_len: 1}]; -} - -// Data source consisting of either a file or an inline value. -message DataSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.DataSource"; - - oneof specifier { - option (validate.required) = true; - - // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_len: 1}]; - - // Bytes inlined in the configuration. - bytes inline_bytes = 2; - - // String inlined in the configuration. - string inline_string = 3; - } -} - -// The message specifies the retry policy of remote data source when fetching fails. -message RetryPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RetryPolicy"; - - // Specifies parameters that control :ref:`retry backoff strategy `. - // This parameter is optional, in which case the default base interval is 1000 milliseconds. The - // default maximum interval is 10 times the base interval. - BackoffStrategy retry_back_off = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. - google.protobuf.UInt32Value max_retries = 2; -} - -// The message specifies how to fetch data from remote and how to verify it. -message RemoteDataSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RemoteDataSource"; - - // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; - - // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_len: 1}]; - - // Retry policy for fetching remote data. - RetryPolicy retry_policy = 3; -} - -// Async data source which support async data fetch. -message AsyncDataSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.AsyncDataSource"; - - oneof specifier { - option (validate.required) = true; - - // Local async data source. - DataSource local = 1; - - // Remote async data source. - RemoteDataSource remote = 2; - } -} - -// Configuration for transport socket in :ref:`listeners ` and -// :ref:`clusters `. If the configuration is -// empty, a default transport socket implementation and configuration will be -// chosen based on the platform and existence of tls_context. -message TransportSocket { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.TransportSocket"; - - reserved 2; - - reserved "config"; - - // The name of the transport socket to instantiate. The name must match a supported transport - // socket implementation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Implementation specific configuration which depends on the implementation being instantiated. - // See the supported transport socket implementations for further documentation. - oneof config_type { - google.protobuf.Any typed_config = 3; - } -} - -// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not -// specified via a runtime key. -// -// .. note:: -// -// Parsing of the runtime key's data is implemented such that it may be represented as a -// :ref:`FractionalPercent ` proto represented as JSON/YAML -// and may also be represented as an integer with the assumption that the value is an integral -// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse -// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. -message RuntimeFractionalPercent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RuntimeFractionalPercent"; - - // Default value if the runtime value's for the numerator/denominator keys are not available. - type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key for a YAML representation of a FractionalPercent. - string runtime_key = 2; -} - -// Identifies a specific ControlPlane instance that Envoy is connected to. -message ControlPlane { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ControlPlane"; - - // An opaque control plane identifier that uniquely identifies an instance - // of control plane. This can be used to identify which control plane instance, - // the Envoy is connected to. - string identifier = 1; -} diff --git a/api/envoy/config/core/v4alpha/config_source.proto b/api/envoy/config/core/v4alpha/config_source.proto deleted file mode 100644 index 54b4824315015..0000000000000 --- a/api/envoy/config/core/v4alpha/config_source.proto +++ /dev/null @@ -1,217 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/authority.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ConfigSourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Configuration sources] - -// xDS API and non-xDS services version. This is used to describe both resource and transport -// protocol versions (in distinct configuration fields). -enum ApiVersion { - reserved 1; - - reserved "V2"; - - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; - - // Use xDS v3 API. - V3 = 2; -} - -// API configuration source. This identifies the API type and cluster that Envoy -// will use to fetch an xDS API. -// [#next-free-field: 9] -message ApiConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.ApiConfigSource"; - - // APIs may be fetched via either REST or gRPC. - enum ApiType { - // Ideally this would be 'reserved 0' but one can't reserve the default - // value. Instead we throw an exception if this is ever used. - DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 - [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - - // REST-JSON v2 API. The `canonical JSON encoding - // `_ for - // the v2 protos is used. - REST = 1; - - // SotW gRPC service. - GRPC = 2; - - // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} - // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state - // with every update, the xDS server only sends what has changed since the last update. - DELTA_GRPC = 3; - - // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be - // multiplexed on a single connection to an ADS endpoint. - // [#not-implemented-hide:] - AGGREGATED_GRPC = 5; - - // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be - // multiplexed on a single connection to an ADS endpoint. - // [#not-implemented-hide:] - AGGREGATED_DELTA_GRPC = 6; - } - - // API type (gRPC, REST, delta gRPC) - ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; - - // Cluster names should be used only with REST. If > 1 - // cluster is defined, clusters will be cycled through if any kind of failure - // occurs. - // - // .. note:: - // - // The cluster with name ``cluster_name`` must be statically defined and its - // type must not be ``EDS``. - repeated string cluster_names = 2; - - // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - // services will be cycled through if any kind of failure occurs. - repeated GrpcService grpc_services = 4; - - // For REST APIs, the delay between successive polls. - google.protobuf.Duration refresh_delay = 3; - - // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; - - // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - // rate limited. - RateLimitSettings rate_limit_settings = 6; - - // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - bool set_node_on_first_message_only = 7; -} - -// Aggregated Discovery Service (ADS) options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that ADS is to be used. -message AggregatedConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.AggregatedConfigSource"; -} - -// [#not-implemented-hide:] -// Self-referencing config source options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that other data can be obtained from the same server. -message SelfConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.SelfConfigSource"; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; -} - -// Rate Limit settings to be applied for discovery requests made by Envoy. -message RateLimitSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RateLimitSettings"; - - // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a - // default value of 100 will be used. - google.protobuf.UInt32Value max_tokens = 1; - - // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens - // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; -} - -// Configuration for :ref:`listeners `, :ref:`clusters -// `, :ref:`routes -// `, :ref:`endpoints -// ` etc. may either be sourced from the -// filesystem or from an xDS API source. Filesystem configs are watched with -// inotify for updates. -// [#next-free-field: 8] -message ConfigSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; - - // Authorities that this config source may be used for. An authority specified in a xdstp:// URL - // is resolved to a *ConfigSource* prior to configuration fetch. This field provides the - // association between authority name and configuration source. - // [#not-implemented-hide:] - repeated xds.core.v3.Authority authorities = 7; - - oneof config_source_specifier { - option (validate.required) = true; - - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - string path = 1; - - // API configuration source. - ApiConfigSource api_config_source = 2; - - // When set, ADS will be used to fetch resources. The ADS API configuration - // source in the bootstrap configuration is used. - AggregatedConfigSource ads = 3; - - // [#not-implemented-hide:] - // When set, the client will access the resources from the same server it got the - // ConfigSource from, although not necessarily from the same stream. This is similar to the - // :ref:`ads` field, except that the client may use a - // different stream to the same server. As a result, this field can be used for things - // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) - // LDS to RDS on the same server without requiring the management server to know its name - // or required credentials. - // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since - // this field can implicitly mean to use the same stream in the case where the ConfigSource - // is provided via ADS and the specified data can also be obtained via ADS.] - SelfConfigSource self = 5; - } - - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - google.protobuf.Duration initial_fetch_timeout = 4; - - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/api/envoy/config/core/v4alpha/event_service_config.proto b/api/envoy/config/core/v4alpha/event_service_config.proto deleted file mode 100644 index a0b4e5590d1d5..0000000000000 --- a/api/envoy/config/core/v4alpha/event_service_config.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "EventServiceConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#not-implemented-hide:] -// Configuration of the event reporting service endpoint. -message EventServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.EventServiceConfig"; - - oneof config_source_specifier { - option (validate.required) = true; - - // Specifies the gRPC service that hosts the event reporting service. - GrpcService grpc_service = 1; - } -} diff --git a/api/envoy/config/core/v4alpha/extension.proto b/api/envoy/config/core/v4alpha/extension.proto deleted file mode 100644 index 4de107580d072..0000000000000 --- a/api/envoy/config/core/v4alpha/extension.proto +++ /dev/null @@ -1,68 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ExtensionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Extension configuration] - -// Message type for extension configuration. -// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. -message TypedExtensionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.TypedExtensionConfig"; - - // The name of an extension. This is not used to select the extension, instead - // it serves the role of an opaque identifier. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The typed config for the extension. The type URL will be used to identify - // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, - // the inner type URL of *TypedStruct* will be utilized. See the - // :ref:`extension configuration overview - // ` for further details. - google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; -} - -// Configuration source specifier for a late-bound extension configuration. The -// parent resource is warmed until all the initial extension configurations are -// received, unless the flag to apply the default configuration is set. -// Subsequent extension updates are atomic on a per-worker basis. Once an -// extension configuration is applied to a request or a connection, it remains -// constant for the duration of processing. If the initial delivery of the -// extension configuration fails, due to a timeout for example, the optional -// default configuration is applied. Without a default configuration, the -// extension is disabled, until an extension configuration is received. The -// behavior of a disabled extension depends on the context. For example, a -// filter chain with a disabled extension filter rejects all incoming streams. -message ExtensionConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.ExtensionConfigSource"; - - ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; - - // Optional default configuration to use as the initial configuration if - // there is a failure to receive the initial extension configuration or if - // `apply_default_config_without_warming` flag is set. - google.protobuf.Any default_config = 2; - - // Use the default config as the initial configuration without warming and - // waiting for the first discovery response. Requires the default configuration - // to be supplied. - bool apply_default_config_without_warming = 3; - - // A set of permitted extension type URLs. Extension configuration updates are rejected - // if they do not match any type URL in the set. - repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/api/envoy/config/core/v4alpha/grpc_method_list.proto b/api/envoy/config/core/v4alpha/grpc_method_list.proto deleted file mode 100644 index 371ea32c10f3a..0000000000000 --- a/api/envoy/config/core/v4alpha/grpc_method_list.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "GrpcMethodListProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC method list] - -// A list of gRPC methods which can be used as an allowlist, for example. -message GrpcMethodList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcMethodList"; - - message Service { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcMethodList.Service"; - - // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The names of the gRPC methods in this service. - repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; - } - - repeated Service services = 1; -} diff --git a/api/envoy/config/core/v4alpha/grpc_service.proto b/api/envoy/config/core/v4alpha/grpc_service.proto deleted file mode 100644 index 973983386c2e8..0000000000000 --- a/api/envoy/config/core/v4alpha/grpc_service.proto +++ /dev/null @@ -1,302 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "GrpcServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC services] - -// gRPC service configuration. This is used by :ref:`ApiConfigSource -// ` and filter configurations. -// [#next-free-field: 6] -message GrpcService { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService"; - - message EnvoyGrpc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.EnvoyGrpc"; - - // The name of the upstream gRPC cluster. SSL credentials will be supplied - // in the :ref:`Cluster ` :ref:`transport_socket - // `. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. - // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. - string authority = 2 - [(validate.rules).string = - {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // [#next-free-field: 9] - message GoogleGrpc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc"; - - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. - message SslCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials"; - - // PEM encoded server root certificates. - DataSource root_certs = 1; - - // PEM encoded client private key. - DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // PEM encoded client certificate chain. - DataSource cert_chain = 3; - } - - // Local channel credentials. Only UDS is supported for now. - // See https://github.com/grpc/grpc/pull/15909. - message GoogleLocalCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials"; - } - - // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call - // credential types. - message ChannelCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials"; - - oneof credential_specifier { - option (validate.required) = true; - - SslCredentials ssl_credentials = 1; - - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_default = 2; - - GoogleLocalCredentials local_credentials = 3; - } - } - - // [#next-free-field: 8] - message CallCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials"; - - message ServiceAccountJWTAccessCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." - "ServiceAccountJWTAccessCredentials"; - - string json_key = 1; - - uint64 token_lifetime_seconds = 2; - } - - message GoogleIAMCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; - - string authorization_token = 1; - - string authority_selector = 2; - } - - message MetadataCredentialsFromPlugin { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." - "MetadataCredentialsFromPlugin"; - - reserved 2; - - reserved "config"; - - string name = 1; - - // [#extension-category: envoy.grpc_credentials] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - // Security token service configuration that allows Google gRPC to - // fetch security token from an OAuth 2.0 authorization server. - // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and - // https://github.com/grpc/grpc/pull/19587. - // [#next-free-field: 10] - message StsService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService"; - - // URI of the token exchange service that handles token exchange requests. - // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - // https://github.com/envoyproxy/protoc-gen-validate/issues/303] - string token_exchange_service_uri = 1; - - // Location of the target service or resource where the client - // intends to use the requested security token. - string resource = 2; - - // Logical name of the target service where the client intends to - // use the requested security token. - string audience = 3; - - // The desired scope of the requested security token in the - // context of the service or resource where the token will be used. - string scope = 4; - - // Type of the requested security token. - string requested_token_type = 5; - - // The path of subject token, a security token that represents the - // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; - - // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; - - // The path of actor token, a security token that represents the identity - // of the acting party. The acting party is authorized to use the - // requested security token and act on behalf of the subject. - string actor_token_path = 8; - - // Type of the actor token. - string actor_token_type = 9; - } - - oneof credential_specifier { - option (validate.required) = true; - - // Access token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. - string access_token = 1; - - // Google Compute Engine credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_compute_engine = 2; - - // Google refresh token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. - string google_refresh_token = 3; - - // Service Account JWT Access credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; - - // Google IAM credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. - GoogleIAMCredentials google_iam = 5; - - // Custom authenticator credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. - // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. - MetadataCredentialsFromPlugin from_plugin = 6; - - // Custom security token service which implements OAuth 2.0 token exchange. - // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 - // See https://github.com/grpc/grpc/pull/19587. - StsService sts_service = 7; - } - } - - // Channel arguments. - message ChannelArgs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs"; - - message Value { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value"; - - // Pointer values are not supported, since they don't make any sense when - // delivered via the API. - oneof value_specifier { - option (validate.required) = true; - - string string_value = 1; - - int64 int_value = 2; - } - } - - // See grpc_types.h GRPC_ARG #defines for keys that work here. - map args = 1; - } - - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_len: 1}]; - - ChannelCredentials channel_credentials = 2; - - // A set of call credentials that can be composed with `channel credentials - // `_. - repeated CallCredentials call_credentials = 3; - - // The human readable prefix to use when emitting statistics for the gRPC - // service. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // streams_total, Counter, Total number of streams opened - // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; - - // The name of the Google gRPC credentials factory to use. This must have been registered with - // Envoy. If this is empty, a default credentials factory will be used that sets up channel - // credentials based on other configuration parameters. - string credentials_factory_name = 5; - - // Additional configuration for site-specific customizations of the Google - // gRPC library. - google.protobuf.Struct config = 6; - - // How many bytes each stream can buffer internally. - // If not set an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; - - // Custom channels args. - ChannelArgs channel_args = 8; - } - - reserved 4; - - oneof target_specifier { - option (validate.required) = true; - - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - EnvoyGrpc envoy_grpc = 1; - - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - GoogleGrpc google_grpc = 2; - } - - // The timeout for the gRPC request. This is the timeout for a specific - // request. - google.protobuf.Duration timeout = 3; - - // Additional metadata to include in streams initiated to the GrpcService. This can be used for - // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to - // be injected. For more information, including details on header value syntax, see the - // documentation on :ref:`custom request headers - // `. - repeated HeaderValue initial_metadata = 5; -} diff --git a/api/envoy/config/core/v4alpha/health_check.proto b/api/envoy/config/core/v4alpha/health_check.proto deleted file mode 100644 index bf86f26e665e3..0000000000000 --- a/api/envoy/config/core/v4alpha/health_check.proto +++ /dev/null @@ -1,372 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/event_service_config.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/v3/http.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Health check] -// * Health checking :ref:`architecture overview `. -// * If health checking is configured for a cluster, additional statistics are emitted. They are -// documented :ref:`here `. - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} - -// [#next-free-field: 25] -message HealthCheck { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; - - // Describes the encoding of the payload bytes in the payload. - message Payload { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.Payload"; - - oneof payload { - option (validate.required) = true; - - // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_len: 1}]; - - // [#not-implemented-hide:] Binary payload. - bytes binary = 2; - } - } - - // [#next-free-field: 12] - message HttpHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.HttpHealthCheck"; - - reserved 5, 7; - - reserved "service_name", "use_http2"; - - // The value of the host header in the HTTP health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The host header can be customized for a specific endpoint by setting the - // :ref:`hostname ` field. - string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Specifies the HTTP path that will be requested during health checking. For example - // */healthcheck*. - string path = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // [#not-implemented-hide:] HTTP specific payload. - Payload send = 3; - - // [#not-implemented-hide:] HTTP specific response. - Payload receive = 4; - - // Specifies a list of HTTP headers that should be added to each request that is sent to the - // health checked cluster. For more information, including details on header value syntax, see - // the documentation on :ref:`custom request headers - // `. - repeated HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request that is sent to the - // health checked cluster. - repeated string request_headers_to_remove = 8 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - // semantics of :ref:`Int64Range `. The start and end of each - // range are required. Only statuses in the range [100, 600) are allowed. - repeated type.v3.Int64Range expected_statuses = 9; - - // Use specified application protocol for health checks. - type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; - - // An optional service name parameter which is used to validate the identity of - // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview - // ` for more information. - type.matcher.v4alpha.StringMatcher service_name_matcher = 11; - } - - message TcpHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.TcpHealthCheck"; - - // Empty payloads imply a connect-only health check. - Payload send = 1; - - // When checking the response, “fuzzy” matching is performed such that each - // binary block must be found, and in the order specified, but not - // necessarily contiguous. - repeated Payload receive = 2; - } - - message RedisHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.RedisHealthCheck"; - - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; - } - - // `grpc.health.v1.Health - // `_-based - // healthcheck. See `gRPC doc `_ - // for details. - message GrpcHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.GrpcHealthCheck"; - - // An optional service name parameter which will be sent to gRPC service in - // `grpc.health.v1.HealthCheckRequest - // `_. - // message. See `gRPC health-checking overview - // `_ for more information. - string service_name = 1; - - // The value of the :authority header in the gRPC health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The authority header can be customized for a specific endpoint by setting - // the :ref:`hostname ` field. - string authority = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // Custom health check. - message CustomHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.CustomHealthCheck"; - - reserved 2; - - reserved "config"; - - // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A custom health checker specific configuration which depends on the custom health checker - // being instantiated. See :api:`envoy/config/health_checker` for reference. - // [#extension-category: envoy.health_checkers] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - // Health checks occur over the transport socket specified for the cluster. This implies that if a - // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. - // - // This allows overriding the cluster TLS settings, just for health check connections. - message TlsOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.TlsOptions"; - - // Specifies the ALPN protocols for health check connections. This is useful if the - // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks - // versus data connections. If empty, no ALPN protocols will be set on health check connections. - repeated string alpn_protocols = 1; - } - - reserved 10; - - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true - gt {} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; - - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; - - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; - - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; - - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; - - oneof health_checker { - option (validate.required) = true; - - // HTTP health check. - HttpHealthCheck http_health_check = 8; - - // TCP health check. - TcpHealthCheck tcp_health_check = 9; - - // gRPC health check. - GrpcHealthCheck grpc_health_check = 11; - - // Custom health check. - CustomHealthCheck custom_health_check = 13; - } - - // The "no traffic interval" is a special health check interval that is used when a cluster has - // never had traffic routed to it. This lower interval allows cluster information to be kept up to - // date, without sending a potentially large amount of active health checking traffic for no - // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. Note that this interval takes precedence over - // any other. - // - // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; - - // The "no traffic healthy interval" is a special health check interval that - // is used for hosts that are currently passing active health checking - // (including new hosts) when the cluster has received no traffic. - // - // This is useful for when we want to send frequent health checks with - // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once - // a host in the cluster is marked as healthy. - // - // Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. - // - // If no_traffic_healthy_interval is not set, it will default to the - // no traffic interval and send that interval regardless of health state. - google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy interval" is a health check interval that is used for hosts that are marked as - // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - // standard health check interval that is defined. - // - // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as unhealthy. For subsequent health checks - // Envoy will shift back to using either "unhealthy interval" if present or the standard health - // check interval that is defined. - // - // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; - - // The "healthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as healthy. For subsequent health checks - // Envoy will shift back to using the standard health check interval that is defined. - // - // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; - - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - string event_log_path = 17; - - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - EventServiceConfig event_service = 22; - - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - bool always_log_health_check_failures = 19; - - // This allows overriding the cluster TLS settings, just for health check connections. - TlsOptions tls_options = 21; - - // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's - // :ref:`tranport socket matches `. - // For example, the following match criteria - // - // .. code-block:: yaml - // - // transport_socket_match_criteria: - // useMTLS: true - // - // Will match the following :ref:`cluster socket match ` - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "useMTLS" - // match: - // useMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the - // :ref:`LbEndpoint.Metadata `. - // This allows using different transport socket capabilities for health checking versus proxying to the - // endpoint. - // - // If the key/values pairs specified do not match any - // :ref:`transport socket matches `, - // the cluster's :ref:`transport socket ` - // will be used for health check socket configuration. - google.protobuf.Struct transport_socket_match_criteria = 23; -} diff --git a/api/envoy/config/core/v4alpha/http_uri.proto b/api/envoy/config/core/v4alpha/http_uri.proto deleted file mode 100644 index ae1c0c9a3d4eb..0000000000000 --- a/api/envoy/config/core/v4alpha/http_uri.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "HttpUriProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP Service URI ] - -// Envoy external URI descriptor -message HttpUri { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HttpUri"; - - // The HTTP server URI. It should be a full FQDN with protocol, host and path. - // - // Example: - // - // .. code-block:: yaml - // - // uri: https://www.googleapis.com/oauth2/v1/certs - // - string uri = 1 [(validate.rules).string = {min_len: 1}]; - - // Specify how `uri` is to be fetched. Today, this requires an explicit - // cluster, but in the future we may support dynamic cluster creation or - // inline DNS resolution. See `issue - // `_. - oneof http_upstream_type { - option (validate.required) = true; - - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - string cluster = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; -} diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto deleted file mode 100644 index 37e5af0c72bd1..0000000000000 --- a/api/envoy/config/core/v4alpha/protocol.proto +++ /dev/null @@ -1,497 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Protocol options] - -// [#not-implemented-hide:] -message TcpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.TcpProtocolOptions"; -} - -// QUIC protocol options which apply to both downstream and upstream connections. -message QuicProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.QuicProtocolOptions"; - - // Maximum number of streams that the client can negotiate per connection. 100 - // if not specified. - google.protobuf.UInt32Value max_concurrent_streams = 1; - - // `Initial stream-level flow-control receive window - // `_ size. Valid values range from - // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16). - // - // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. - // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the stream buffers. - google.protobuf.UInt32Value initial_stream_window_size = 2 - [(validate.rules).uint32 = {lte: 16777216 gte: 1}]; - - // Similar to *initial_stream_window_size*, but for connection-level - // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). - // window. Currently, this has the same minimum/default as *initial_stream_window_size*. - // - // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default - // window size now, so it's also the minimum. - google.protobuf.UInt32Value initial_connection_window_size = 3 - [(validate.rules).uint32 = {lte: 25165824 gte: 1}]; -} - -message UpstreamHttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.UpstreamHttpProtocolOptions"; - - // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - bool auto_sni = 1; - - // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. - bool auto_san_validation = 2; -} - -// Configures the alternate protocols cache which tracks alternate protocols that can be used to -// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for -// HTTP Alternate Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 -// for the "HTTPS" DNS resource record. -message AlternateProtocolsCacheOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.AlternateProtocolsCacheOptions"; - - // The name of the cache. Multiple named caches allow independent alternate protocols cache - // configurations to operate within a single Envoy process using different configurations. All - // alternate protocols cache options with the same name *must* be equal in all fields when - // referenced from different configuration components. Configuration will fail to load if this is - // not the case. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The maximum number of entries that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum entries in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; -} - -// [#next-free-field: 7] -message HttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HttpProtocolOptions"; - - // Action to take when Envoy receives client request with header names containing underscore - // characters. - // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented - // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore - // characters. - enum HeadersWithUnderscoresAction { - // Allow headers with underscores. This is the default behavior. - ALLOW = 0; - - // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests - // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter - // is incremented for each rejected request. - REJECT_REQUEST = 1; - - // Drop the header with name containing underscores. The header is dropped before the filter chain is - // invoked and as such filters will not see dropped headers. The - // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. - DROP_HEADER = 2; - } - - // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. When the - // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - // downstream connection a drain sequence will occur prior to closing the connection, see - // :ref:`drain_timeout - // `. - // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled for downstream connections according to the value for - // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. - google.protobuf.Duration idle_timeout = 1; - - // The maximum duration of a connection. The duration is defined as a period since a connection - // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout - // `. - // Note: not implemented for upstream connections. - google.protobuf.Duration max_connection_duration = 3; - - // The maximum number of headers. If unconfigured, the default - // maximum number of request headers allowed is 100. Requests that exceed this limit will receive - // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - // reset independent of any other timeouts. If not specified, this value is not set. - google.protobuf.Duration max_stream_duration = 4; - - // Action to take when a client request with a header name containing underscore characters is received. - // If this setting is not specified, the value defaults to ALLOW. - // Note: upstream responses are not affected by this setting. - HeadersWithUnderscoresAction headers_with_underscores_action = 5; - - // Optional maximum requests for both upstream and downstream connections. - // If not specified, there is no limit. - // Setting this parameter to 1 will effectively disable keep alive. - // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. - google.protobuf.UInt32Value max_requests_per_connection = 6; -} - -// [#next-free-field: 8] -message Http1ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http1ProtocolOptions"; - - // [#next-free-field: 9] - message HeaderKeyFormat { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat"; - - message ProperCaseWords { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; - } - - oneof header_format { - option (validate.required) = true; - - // Formats the header by proper casing words: the first character and any character following - // a special character will be capitalized if it's an alpha character. For example, - // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - // Note that while this results in most headers following conventional casing, certain headers - // are not covered. For example, the "TE" header will be formatted as "Te". - ProperCaseWords proper_case_words = 1; - - // Configuration for stateful formatter extensions that allow using received headers to - // affect the output of encoding headers. E.g., preserving case during proxying. - // [#extension-category: envoy.http.stateful_header_formatters] - TypedExtensionConfig stateful_formatter = 8; - } - } - - // Handle HTTP requests with absolute URLs in the requests. These requests - // are generally sent by clients to forward/explicit proxies. This allows clients to configure - // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - // *http_proxy* environment variable. - google.protobuf.BoolValue allow_absolute_url = 1; - - // Handle incoming HTTP/1.0 and HTTP 0.9 requests. - // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - // style connect logic, dechunking, and handling lack of client host iff - // *default_host_for_http_10* is configured. - bool accept_http_10 = 2; - - // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - // Envoy does not otherwise support HTTP/1.0 without a Host header. - // This is a no-op if *accept_http_10* is not true. - string default_host_for_http_10 = 3; - - // Describes how the keys for response headers should be formatted. By default, all header keys - // are lower cased. - HeaderKeyFormat header_key_format = 4; - - // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - // - // .. attention:: - // - // Note that this only happens when Envoy is chunk encoding which occurs when: - // - The request is HTTP/1.1. - // - Is neither a HEAD only request nor a HTTP Upgrade. - // - Not a response to a HEAD request. - // - The content length header is not present. - bool enable_trailers = 5; - - // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding` - // headers set. By default such messages are rejected, but if option is enabled - Envoy will - // remove Content-Length header and process message. - // See `RFC7230, sec. 3.3.3 ` for details. - // - // .. attention:: - // Enabling this option might lead to request smuggling vulnerability, especially if traffic - // is proxied via multiple layers of proxies. - bool allow_chunked_length = 6; - - // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate - // HTTP/1.1 connections upon receiving an invalid HTTP message. However, - // when this option is true, then Envoy will leave the HTTP/1.1 connection - // open where possible. - // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // `. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; -} - -message KeepaliveSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.KeepaliveSettings"; - - // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. - // If this is zero, interval PINGs will not be sent. - google.protobuf.Duration interval = 1 [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // How long to wait for a response to a keepalive PING. If a response is not received within this - // time period, the connection will be aborted. - google.protobuf.Duration timeout = 2 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // A random jitter amount as a percentage of interval that will be added to each interval. - // A value of zero means there will be no jitter. - // The default value is 15%. - type.v3.Percent interval_jitter = 3; - - // If the connection has been idle for this duration, send a HTTP/2 ping ahead - // of new stream creation, to quickly detect dead connections. - // If this is zero, this type of PING will not be sent. - // If an interval ping is outstanding, a second ping will not be sent as the - // interval ping will determine if the connection is dead. - google.protobuf.Duration connection_idle_interval = 4 - [(validate.rules).duration = {gte {nanos: 1000000}}]; -} - -// [#next-free-field: 16] -message Http2ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http2ProtocolOptions"; - - // Defines a parameter to be sent in the SETTINGS frame. - // See `RFC7540, sec. 6.5.1 `_ for details. - message SettingsParameter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter"; - - // The 16 bit parameter identifier. - google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65535 gte: 0}, - (validate.rules).message = {required: true} - ]; - - // The 32 bit parameter value. - google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; - } - - reserved 12; - - reserved "stream_error_on_invalid_http_messaging"; - - // `Maximum table size `_ - // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values - // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header - // compression. - google.protobuf.UInt32Value hpack_table_size = 1; - - // `Maximum concurrent streams `_ - // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) - // and defaults to 2147483647. - // - // For upstream connections, this also limits how many streams Envoy will initiate concurrently - // on a single connection. If the limit is reached, Envoy may queue requests or establish - // additional connections (as allowed per circuit breaker limits). - // - // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given - // connection based on upstream settings. Config dumps will reflect the configured upper bound, - // not the per-connection negotiated limits. - google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; - - // `Initial stream-level flow-control window - // `_ size. Valid values range from 65535 - // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 - // (256 * 1024 * 1024). - // - // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default - // window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the codec buffers. - google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Similar to *initial_stream_window_size*, but for connection-level flow-control - // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. - google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Allows proxying Websocket and other upgrades over H2 connect. - bool allow_connect = 5; - - // [#not-implemented-hide:] Hiding until envoy has full metadata support. - // Still under implementation. DO NOT USE. - // - // Allows metadata. See [metadata - // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more - // information. - bool allow_metadata = 6; - - // Limit the number of pending outbound downstream frames of all types (frames that are waiting to - // be written into the socket). Exceeding this limit triggers flood mitigation and connection is - // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due - // to flood mitigation. The default limit is 10000. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, - // preventing high memory utilization when receiving continuous stream of these frames. Exceeding - // this limit triggers flood mitigation and connection is terminated. The - // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood - // mitigation. The default limit is 1000. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an - // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but - // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` - // stat tracks the number of connections terminated due to flood mitigation. - // Setting this to 0 will terminate connection upon receiving first frame with an empty payload - // and no end stream flag. The default limit is 1. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; - - // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number - // of PRIORITY frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // max_inbound_priority_frames_per_stream * (1 + opened_streams) - // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when - // Envoy receives complete response headers from the upstream server. For upstream connection the - // `opened_streams` is incremented when Envoy send the HEADERS frame for a new stream. The - // ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 100. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; - - // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number - // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // 5 + 2 * (opened_streams + - // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) - // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when - // Envoy receives complete response headers from the upstream server. For upstream connections the - // `opened_streams` is incremented when Envoy sends the HEADERS frame for a new stream. The - // ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to - // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. - // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, - // but more complex implementations that try to estimate available bandwidth require at least 2. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 - [(validate.rules).uint32 = {gte: 1}]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // ` - // - // See `RFC7540, sec. 8.1 `_ for details. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; - - // [#not-implemented-hide:] - // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: - // - // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by - // Envoy. - // - // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field - // 'allow_connect'. - // - // Note that custom parameters specified through this field can not also be set in the - // corresponding named parameters: - // - // .. code-block:: text - // - // ID Field Name - // ---------------- - // 0x1 hpack_table_size - // 0x3 max_concurrent_streams - // 0x4 initial_stream_window_size - // - // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies - // between custom parameters with the same identifier will trigger a failure. - // - // See `IANA HTTP/2 Settings - // `_ for - // standardized identifiers. - repeated SettingsParameter custom_settings_parameters = 13; - - // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer - // does not respond within the configured timeout, the connection will be aborted. - KeepaliveSettings connection_keepalive = 15; -} - -// [#not-implemented-hide:] -message GrpcProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcProtocolOptions"; - - Http2ProtocolOptions http2_protocol_options = 1; -} - -// A message which allows using HTTP/3. -message Http3ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http3ProtocolOptions"; - - QuicProtocolOptions quic_protocol_options = 1; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // `. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 2; -} - -// A message to control transformations to the :scheme header -message SchemeHeaderTransformation { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.SchemeHeaderTransformation"; - - oneof transformation { - // Overwrite any Scheme header with the contents of this string. - string scheme_to_overwrite = 1 [(validate.rules).string = {in: "http" in: "https"}]; - } -} diff --git a/api/envoy/config/core/v4alpha/proxy_protocol.proto b/api/envoy/config/core/v4alpha/proxy_protocol.proto deleted file mode 100644 index 1650f29d8cab6..0000000000000 --- a/api/envoy/config/core/v4alpha/proxy_protocol.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Proxy Protocol] - -message ProxyProtocolConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.ProxyProtocolConfig"; - - enum Version { - // PROXY protocol version 1. Human readable format. - V1 = 0; - - // PROXY protocol version 2. Binary format. - V2 = 1; - } - - // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details - Version version = 1; -} diff --git a/api/envoy/config/core/v4alpha/resolver.proto b/api/envoy/config/core/v4alpha/resolver.proto deleted file mode 100644 index 4849a54161ced..0000000000000 --- a/api/envoy/config/core/v4alpha/resolver.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ResolverProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Resolver] - -// Configuration of DNS resolver option flags which control the behavior of the DNS resolver. -message DnsResolverOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.DnsResolverOptions"; - - // Use TCP for all DNS queries instead of the default protocol UDP. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only uses UDP for DNS resolution. - bool use_tcp_for_dns_lookups = 1; - - // Do not use the default search domains; only query hostnames as-is or as aliases. - bool no_default_search_domain = 2; -} - -// DNS resolution configuration which includes the underlying dns resolver addresses and options. -message DnsResolutionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.DnsResolutionConfig"; - - // A list of dns resolver addresses. If specified, the DNS client library will perform resolution - // via the underlying DNS resolvers. Otherwise, the default system resolvers - // (e.g., /etc/resolv.conf) will be used. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - repeated Address resolvers = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. - DnsResolverOptions dns_resolver_options = 2; -} diff --git a/api/envoy/config/core/v4alpha/socket_option.proto b/api/envoy/config/core/v4alpha/socket_option.proto deleted file mode 100644 index 7dac394a865dc..0000000000000 --- a/api/envoy/config/core/v4alpha/socket_option.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "SocketOptionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Socket Option ] - -// Generic socket option message. This would be used to set socket options that -// might not exist in upstream kernels or precompiled Envoy binaries. -// [#next-free-field: 7] -message SocketOption { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketOption"; - - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } - - // An optional name to give this socket option for debugging, etc. - // Uniqueness is not required and no special meaning is assumed. - string description = 1; - - // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - int64 level = 2; - - // The numeric name as passed to setsockopt - int64 name = 3; - - oneof value { - option (validate.required) = true; - - // Because many sockopts take an int value. - int64 int_value = 4; - - // Otherwise it's a byte buffer. - bytes buf_value = 5; - } - - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/api/envoy/config/core/v4alpha/substitution_format_string.proto b/api/envoy/config/core/v4alpha/substitution_format_string.proto deleted file mode 100644 index 6f5037f5f1770..0000000000000 --- a/api/envoy/config/core/v4alpha/substitution_format_string.proto +++ /dev/null @@ -1,101 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "SubstitutionFormatStringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Substitution format string] - -// Configuration to use multiple :ref:`command operators ` -// to generate a new string in either plain text or JSON format. -// [#next-free-field: 7] -message SubstitutionFormatString { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.SubstitutionFormatString"; - - reserved 1; - - reserved "text_format"; - - oneof format { - option (validate.required) = true; - - // Specify a format with command operators to form a JSON string. - // Its details is described in :ref:`format dictionary`. - // Values are rendered as strings, numbers, or boolean values as appropriate. - // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). - // See the documentation for a specific command operator for details. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // json_format: - // status: "%RESPONSE_CODE%" - // message: "%LOCAL_REPLY_BODY%" - // - // The following JSON object would be created: - // - // .. code-block:: json - // - // { - // "status": 500, - // "message": "My error message" - // } - // - google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; - - // Specify a format with command operators to form a text string. - // Its details is described in :ref:`format string`. - // - // For example, setting ``text_format`` like below, - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format_source: - // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // generates plain text similar to: - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - DataSource text_format_source = 5; - } - - // If set to true, when command operators are evaluated to null, - // - // * for ``text_format``, the output of the empty operator is changed from ``-`` to an - // empty string, so that empty values are omitted entirely. - // * for ``json_format`` the keys with null values are omitted in the output structure. - bool omit_empty_values = 3; - - // Specify a *content_type* field. - // If this field is not set then ``text/plain`` is used for *text_format* and - // ``application/json`` is used for *json_format*. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // content_type: "text/html; charset=UTF-8" - // - string content_type = 4; - - // Specifies a collection of Formatter plugins that can be called from the access log configuration. - // See the formatters extensions documentation for details. - // [#extension-category: envoy.formatter] - repeated TypedExtensionConfig formatters = 6; -} diff --git a/api/envoy/config/core/v4alpha/udp_socket_config.proto b/api/envoy/config/core/v4alpha/udp_socket_config.proto deleted file mode 100644 index 5fa6c6ec52dd1..0000000000000 --- a/api/envoy/config/core/v4alpha/udp_socket_config.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "UdpSocketConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: UDP socket config] - -// Generic UDP socket configuration. -message UdpSocketConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.UdpSocketConfig"; - - // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate - // more memory per socket. Received datagrams above this size will be dropped. If not set - // defaults to 1500 bytes. - google.protobuf.UInt64Value max_rx_datagram_size = 1 - [(validate.rules).uint64 = {lt: 65536 gt: 0}]; - - // Configures whether Generic Receive Offload (GRO) - // _ is preferred when reading from the - // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. - // This option affects performance but not functionality. If GRO is not supported by the operating - // system, non-GRO receive will be used. - google.protobuf.BoolValue prefer_gro = 2; -} diff --git a/api/envoy/config/endpoint/v3/endpoint_components.proto b/api/envoy/config/endpoint/v3/endpoint_components.proto index 0e10ac3b2fca7..0a9aac105e72d 100644 --- a/api/envoy/config/endpoint/v3/endpoint_components.proto +++ b/api/envoy/config/endpoint/v3/endpoint_components.proto @@ -4,10 +4,12 @@ package envoy.config.endpoint.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/health_check.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -108,21 +110,51 @@ message LbEndpoint { google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; } +// [#not-implemented-hide:] +// A configuration for a LEDS collection. +message LedsClusterLocalityConfig { + // Configuration for the source of LEDS updates for a Locality. + core.v3.ConfigSource leds_config = 1; + + // The xDS transport protocol glob collection resource name. + // The service is only supported in delta xDS (incremental) mode. + string leds_collection_name = 2; +} + // A group of endpoints belonging to a Locality. // One can have multiple LocalityLbEndpoints for a locality, but this is // generally only done if the different groups need to have different load // balancing weights or different priorities. -// [#next-free-field: 7] +// [#next-free-field: 9] message LocalityLbEndpoints { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LocalityLbEndpoints"; + // [#not-implemented-hide:] + // A list of endpoints of a specific locality. + message LbEndpointList { + repeated LbEndpoint lb_endpoints = 1; + } + // Identifies location of where the upstream hosts run. core.v3.Locality locality = 1; // The group of endpoints belonging to the locality specified. + // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be + // deprecated and replaced by *load_balancer_endpoints*.] repeated LbEndpoint lb_endpoints = 2; + // [#not-implemented-hide:] + oneof lb_config { + // The group of endpoints belonging to the locality. + // [#comment:TODO(adisuissa): Once LEDS is implemented the *lb_endpoints* field + // needs to be deprecated.] + LbEndpointList load_balancer_endpoints = 7; + + // LEDS Configuration for the current locality. + LedsClusterLocalityConfig leds_cluster_locality_config = 8; + } + // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load // balancing weight for a locality is divided by the sum of the weights of all // localities at the same priority level to produce the effective percentage diff --git a/api/envoy/config/listener/v4alpha/BUILD b/api/envoy/config/listener/v4alpha/BUILD deleted file mode 100644 index 005a92722c4eb..0000000000000 --- a/api/envoy/config/listener/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/api/envoy/config/listener/v4alpha/api_listener.proto b/api/envoy/config/listener/v4alpha/api_listener.proto deleted file mode 100644 index 518caf879ad5e..0000000000000 --- a/api/envoy/config/listener/v4alpha/api_listener.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "ApiListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: API listener] - -// Describes a type of API listener, which is used in non-proxy clients. The type of API -// exposed to the non-proxy application depends on the type of API listener. -message ApiListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ApiListener"; - - // The type in this field determines the type of API listener. At present, the following - // types are supported: - // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) - // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) - // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the - // specific config message for each type of API listener. We could not do this in v2 because - // it would have caused circular dependencies for go protos: lds.proto depends on this file, - // and http_connection_manager.proto depends on rds.proto, which is in the same directory as - // lds.proto, so lds.proto cannot depend on this file.] - google.protobuf.Any api_listener = 1; -} diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto deleted file mode 100644 index e26160cb2a4ae..0000000000000 --- a/api/envoy/config/listener/v4alpha/listener.proto +++ /dev/null @@ -1,317 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/socket_option.proto"; -import "envoy/config/listener/v4alpha/api_listener.proto"; -import "envoy/config/listener/v4alpha/listener_components.proto"; -import "envoy/config/listener/v4alpha/udp_listener_config.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/collection_entry.proto"; - -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "ListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Listener configuration] -// Listener :ref:`configuration overview ` - -// Listener list collections. Entries are *Listener* resources or references. -// [#not-implemented-hide:] -message ListenerCollection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ListenerCollection"; - - repeated xds.core.v3.CollectionEntry entries = 1; -} - -// [#next-free-field: 30] -message Listener { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; - - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - - // [#not-implemented-hide:] - message DeprecatedV1 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.Listener.DeprecatedV1"; - - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // This is deprecated. Use :ref:`Listener.bind_to_port - // ` - google.protobuf.BoolValue bind_to_port = 1; - } - - // Configuration for listener connection balancing. - message ConnectionBalanceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.Listener.ConnectionBalanceConfig"; - - // A connection balancer implementation that does exact balancing. This means that a lock is - // held during balancing so that connection counts are nearly exactly balanced between worker - // threads. This is "nearly" exact in the sense that a connection might close in parallel thus - // making the counts incorrect, but this should be rectified on the next accept. This balancer - // sacrifices accept throughput for accuracy and should be used when there are a small number of - // connections that rarely cycle (e.g., service mesh gRPC egress). - message ExactBalance { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance"; - } - - oneof balance_type { - option (validate.required) = true; - - // If specified, the listener will use the exact connection balancer. - ExactBalance exact_balance = 1; - } - } - - // Configuration for envoy internal listener. All the future internal listener features should be added here. - // [#not-implemented-hide:] - message InternalListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.Listener.InternalListenerConfig"; - } - - reserved 14, 23, 7, 21; - - reserved "deprecated_v1", "reuse_port"; - - // The unique name by which this listener is known. If no name is provided, - // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - // updated or removed via :ref:`LDS ` a unique name must be provided. - string name = 1; - - // The address that the listener should listen on. In general, the address must be unique, though - // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - // Linux as the actual port will be allocated by the OS. - core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}]; - - // Optional prefix to use on listener stats. If empty, the stats will be rooted at - // `listener.
.`. If non-empty, stats will be rooted at - // `listener..`. - string stat_prefix = 28; - - // A list of filter chains to consider for this listener. The - // :ref:`FilterChain ` with the most specific - // :ref:`FilterChainMatch ` criteria is used on a - // connection. - // - // Example using SNI for filter chain selection can be found in the - // :ref:`FAQ entry `. - repeated FilterChain filter_chains = 3; - - // If a connection is redirected using *iptables*, the port on which the proxy - // receives it might be different from the original destination address. When this flag is set to - // true, the listener hands off redirected connections to the listener associated with the - // original destination address. If there is no listener associated with the original destination - // address, the connection is handled by the listener that receives it. Defaults to false. - google.protobuf.BoolValue use_original_dst = 4; - - // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, - // the connection will be closed. The filter chain match is ignored in this field. - FilterChain default_filter_chain = 25; - - // Soft limit on size of the listener’s new connection read and write buffers. - // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Listener metadata. - core.v4alpha.Metadata metadata = 6; - - // The type of draining to perform at a listener-wide level. - DrainType drain_type = 8; - - // Listener filters have the opportunity to manipulate and augment the connection metadata that - // is used in connection filter chain matching, for example. These filters are run before any in - // :ref:`filter_chains `. Order matters as the - // filters are processed sequentially right after a socket has been accepted by the listener, and - // before a connection is created. - // UDP Listener filters can be specified when the protocol in the listener socket address in - // :ref:`protocol ` is :ref:`UDP - // `. - // UDP listeners currently support a single filter. - repeated ListenerFilter listener_filters = 9; - - // The timeout to wait for all listener filters to complete operation. If the timeout is reached, - // the accepted socket is closed without a connection being created unless - // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - // timeout. If not specified, a default timeout of 15s is used. - google.protobuf.Duration listener_filters_timeout = 15; - - // Whether a connection should be created when listener filters timeout. Default is false. - // - // .. attention:: - // - // Some listener filters, such as :ref:`Proxy Protocol filter - // `, should not be used with this option. It will cause - // unexpected behavior when a connection is created. - bool continue_on_listener_filters_timeout = 17; - - // Whether the listener should be set as a transparent socket. - // When this flag is set to true, connections can be redirected to the listener using an - // *iptables* *TPROXY* target, in which case the original source and destination addresses and - // ports are preserved on accepted connections. This flag should be used in combination with - // :ref:`an original_dst ` :ref:`listener filter - // ` to mark the connections' local addresses as - // "restored." This can be used to hand off each redirected connection to another listener - // associated with the connection's destination address. Direct connections to the socket without - // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - // therefore treated as if they were redirected. - // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - // When this flag is not set (default), the socket is not modified, i.e. the transparent option - // is neither set nor reset. - google.protobuf.BoolValue transparent = 10; - - // Whether the listener should set the *IP_FREEBIND* socket option. When this - // flag is set to true, listeners can be bound to an IP address that is not - // configured on the system running Envoy. When this flag is set to false, the - // option *IP_FREEBIND* is disabled on the socket. When this flag is not set - // (default), the socket is not modified, i.e. the option is neither enabled - // nor disabled. - google.protobuf.BoolValue freebind = 11; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.v4alpha.SocketOption socket_options = 13; - - // Whether the listener should accept TCP Fast Open (TFO) connections. - // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - // the socket, with a queue length of the specified size - // (see `details in RFC7413 `_). - // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - // When this flag is not set (default), the socket is not modified, - // i.e. the option is neither enabled nor disabled. - // - // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - // TCP_FASTOPEN. - // See `ip-sysctl.txt `_. - // - // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - - // Specifies the intended direction of the traffic relative to the local Envoy. - // This property is required on Windows for listeners using the original destination filter, - // see :ref:`Original Destination `. - core.v4alpha.TrafficDirection traffic_direction = 16; - - // If the protocol in the listener socket address in :ref:`protocol - // ` is :ref:`UDP - // `, this field specifies UDP - // listener specific configuration. - UdpListenerConfig udp_listener_config = 18; - - // Used to represent an API listener, which is used in non-proxy clients. The type of API - // exposed to the non-proxy application depends on the type of API listener. - // When this field is set, no other field except for :ref:`name` - // should be set. - // - // .. note:: - // - // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, - // not LDS. - // - // [#next-major-version: In the v3 API, instead of this messy approach where the socket - // listener fields are directly in the top-level Listener message and the API listener types - // are in the ApiListener message, the socket listener messages should be in their own message, - // and the top-level Listener should essentially be a oneof that selects between the - // socket listener and the various types of API listener. That way, a given Listener message - // can structurally only contain the fields of the relevant type.] - ApiListener api_listener = 19; - - // The listener's connection balancer configuration, currently only applicable to TCP listeners. - // If no configuration is specified, Envoy will not attempt to balance active connections between - // worker threads. - // - // In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 - // by setting :ref:`use_original_dst ` in X - // and :ref:`bind_to_port ` to false in Y1 and Y2, - // it is recommended to disable the balance config in listener X to avoid the cost of balancing, and - // enable the balance config in Y1 and Y2 to balance the connections among the workers. - ConnectionBalanceConfig connection_balance_config = 20; - - // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and - // create one socket for each worker thread. This makes inbound connections - // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. This field - // defaults to true. - // - // .. attention:: - // - // Although this field defaults to true, it has different behavior on different platforms. See - // the following text for more information. - // - // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly - // with hot restart. - // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, - // the last socket wins and receives all connections/packets. For TCP, reuse_port is force - // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive - // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only - // a single worker will currently receive packets. - // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user - // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. - google.protobuf.BoolValue enable_reuse_port = 29; - - // Configuration for :ref:`access logs ` - // emitted by this listener. - repeated accesslog.v4alpha.AccessLog access_log = 22; - - // The maximum length a tcp listener's pending connections queue can grow to. If no value is - // provided net.core.somaxconn will be used on Linux and 128 otherwise. - google.protobuf.UInt32Value tcp_backlog_size = 24; - - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that set - // :ref:`use_original_dst ` - // to true. Default is true. - google.protobuf.BoolValue bind_to_port = 26; - - // The exclusive listener type and the corresponding config. - // TODO(lambdai): https://github.com/envoyproxy/envoy/issues/15372 - // Will create and add TcpListenerConfig. Will add UdpListenerConfig and ApiListener. - // [#not-implemented-hide:] - oneof listener_specifier { - // Used to represent an internal listener which does not listen on OSI L4 address but can be used by the - // :ref:`envoy cluster ` to create a user space connection to. - // The internal listener acts as a tcp listener. It supports listener filters and network filter chains. - // The internal listener require :ref:`address ` has - // field `envoy_internal_address`. - // - // There are some limitations are derived from the implementation. The known limitations include - // - // * :ref:`ConnectionBalanceConfig ` is not - // allowed because both cluster connection and listener connection must be owned by the same dispatcher. - // * :ref:`tcp_backlog_size ` - // * :ref:`freebind ` - // * :ref:`transparent ` - // [#not-implemented-hide:] - InternalListenerConfig internal_listener = 27; - } -} diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto deleted file mode 100644 index 6fc16227542f9..0000000000000 --- a/api/envoy/config/listener/v4alpha/listener_components.proto +++ /dev/null @@ -1,349 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "ListenerComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Listener components] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 6] -message Filter { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Filter"; - - reserved 3, 2; - - reserved "config"; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - // [#extension-category: envoy.filters.network] - google.protobuf.Any typed_config = 4; - - // Configuration source specifier for an extension configuration discovery - // service. In case of a failure and without the default configuration, the - // listener closes the connections. - // [#not-implemented-hide:] - core.v4alpha.ExtensionConfigSource config_discovery = 5; - } -} - -// Specifies the match criteria for selecting a specific filter chain for a -// listener. -// -// In order for a filter chain to be selected, *ALL* of its criteria must be -// fulfilled by the incoming connection, properties of which are set by the -// networking stack and/or listener filters. -// -// The following order applies: -// -// 1. Destination port. -// 2. Destination IP address. -// 3. Server name (e.g. SNI for TLS protocol), -// 4. Transport protocol. -// 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Directly connected source IP address (this will only be different from the source IP address -// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol -// listener filter `). -// 7. Source type (e.g. any, local or external network). -// 8. Source IP address. -// 9. Source port. -// -// For criteria that allow ranges or wildcards, the most specific value in any -// of the configured filter chains that matches the incoming connection is going -// to be used (e.g. for SNI ``www.example.com`` the most specific match would be -// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter -// chain without ``server_names`` requirements). -// -// A different way to reason about the filter chain matches: -// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. -// In each step, filter chains which most specifically matches the attributes continue to the next step. -// The listener guarantees at most 1 filter chain is left after all of the steps. -// -// Example: -// -// For destination port, filter chains specifying the destination port of incoming traffic are the -// most specific match. If none of the filter chains specifies the exact destination port, the filter -// chains which do not specify ports are the most specific match. Filter chains specifying the -// wrong port can never be the most specific match. -// -// [#comment: Implemented rules are kept in the preference order, with deprecated fields -// listed at the end, because that's how we want to list them in the docs. -// -// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 14] -message FilterChainMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.FilterChainMatch"; - - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - - // Match a connection originating from the same host. - SAME_IP_OR_LOOPBACK = 1; - - // Match a connection originating from a different host. - EXTERNAL = 2; - } - - reserved 1; - - // Optional destination port to consider when use_original_dst is set on the - // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; - - // If non-empty, an IP address and prefix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - repeated core.v4alpha.CidrRange prefix_ranges = 3; - - // If non-empty, an IP address and suffix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - // [#not-implemented-hide:] - string address_suffix = 4; - - // [#not-implemented-hide:] - google.protobuf.UInt32Value suffix_len = 5; - - // The criteria is satisfied if the directly connected source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the parameter is not - // specified or the list is empty, the directly connected source IP address is ignored. - repeated core.v4alpha.CidrRange direct_source_prefix_ranges = 13; - - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; - - // The criteria is satisfied if the source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the - // parameter is not specified or the list is empty, the source IP address is - // ignored. - repeated core.v4alpha.CidrRange source_prefix_ranges = 6; - - // The criteria is satisfied if the source port of the downstream connection - // is contained in at least one of the specified ports. If the parameter is - // not specified, the source port is ignored. - repeated uint32 source_ports = 7 - [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; - - // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining - // a filter chain match. Those values will be compared against the server names of a new - // connection, when detected by one of the listener filters. - // - // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` - // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. - // - // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. - // - // .. attention:: - // - // See the :ref:`FAQ entry ` on how to configure SNI for more - // information. - repeated string server_names = 11; - - // If non-empty, a transport protocol to consider when determining a filter chain match. - // This value will be compared against the transport protocol of a new connection, when - // it's detected by one of the listener filters. - // - // Suggested values include: - // - // * ``raw_buffer`` - default, used when no transport protocol is detected, - // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // when TLS protocol is detected. - string transport_protocol = 9; - - // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when - // determining a filter chain match. Those values will be compared against the application - // protocols of a new connection, when detected by one of the listener filters. - // - // Suggested values include: - // - // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector - // `, - // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // - // .. attention:: - // - // Currently, only :ref:`TLS Inspector ` provides - // application protocol detection based on the requested - // `ALPN `_ values. - // - // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, - // and matching on values other than ``h2`` is going to lead to a lot of false negatives, - // unless all connecting clients are known to use ALPN. - repeated string application_protocols = 10; -} - -// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and -// various other parameters. -// [#next-free-field: 10] -message FilterChain { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.FilterChain"; - - // The configuration for on-demand filter chain. If this field is not empty in FilterChain message, - // a filter chain will be built on-demand. - // On-demand filter chains help speedup the warming up of listeners since the building and initialization of - // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. - // Filter chains that are not often used can be set as on-demand. - message OnDemandConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.FilterChain.OnDemandConfiguration"; - - // The timeout to wait for filter chain placeholders to complete rebuilding. - // 1. If this field is set to 0, timeout is disabled. - // 2. If not specified, a default timeout of 15s is used. - // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. - // Upon failure or timeout, all connections related to this filter chain will be closed. - // Rebuilding will start again on the next new connection. - google.protobuf.Duration rebuild_timeout = 1; - } - - reserved 2, 4; - - reserved "tls_context", "use_proxy_proto"; - - // The criteria to use when matching a connection to this filter chain. - FilterChainMatch filter_chain_match = 1; - - // A list of individual network filters that make up the filter chain for - // connections established with the listener. Order matters as the filters are - // processed sequentially as connection events happen. Note: If the filter - // list is empty, the connection will close by default. - repeated Filter filters = 3; - - // [#not-implemented-hide:] filter chain metadata. - core.v4alpha.Metadata metadata = 5; - - // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`DownstreamTlsContext ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - // [#extension-category: envoy.transport_sockets.downstream] - core.v4alpha.TransportSocket transport_socket = 6; - - // If present and nonzero, the amount of time to allow incoming connections to complete any - // transport socket negotiations. If this expires before the transport reports connection - // establishment, the connection is summarily closed. - google.protobuf.Duration transport_socket_connect_timeout = 9; - - // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - // chain is to be dynamically updated or removed via FCDS a unique name must be provided. - string name = 7; - - // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. - // If this field is not empty, the filter chain will be built on-demand. - // Otherwise, the filter chain will be built normally and block listener warming. - OnDemandConfiguration on_demand_configuration = 8; -} - -// Listener filter chain match configuration. This is a recursive structure which allows complex -// nested match configurations to be built using various logical operators. -// -// Examples: -// -// * Matches if the destination port is 3306. -// -// .. code-block:: yaml -// -// destination_port_range: -// start: 3306 -// end: 3307 -// -// * Matches if the destination port is 3306 or 15000. -// -// .. code-block:: yaml -// -// or_match: -// rules: -// - destination_port_range: -// start: 3306 -// end: 3307 -// - destination_port_range: -// start: 15000 -// end: 15001 -// -// [#next-free-field: 6] -message ListenerFilterChainMatchPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ListenerFilterChainMatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated ListenerFilterChainMatchPredicate rules = 1 - [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - ListenerFilterChainMatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // Match destination port. Particularly, the match evaluation must use the recovered local port if - // the owning listener filter is after :ref:`an original_dst listener filter `. - type.v3.Int32Range destination_port_range = 5; - } -} - -message ListenerFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ListenerFilter"; - - reserved 2; - - reserved "config"; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] - google.protobuf.Any typed_config = 3; - } - - // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - // See :ref:`ListenerFilterChainMatchPredicate ` - // for further examples. - ListenerFilterChainMatchPredicate filter_disabled = 4; -} diff --git a/api/envoy/config/listener/v4alpha/quic_config.proto b/api/envoy/config/listener/v4alpha/quic_config.proto deleted file mode 100644 index 0b6d6bd7584ce..0000000000000 --- a/api/envoy/config/listener/v4alpha/quic_config.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/protocol.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "QuicConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: QUIC listener config] - -// Configuration specific to the UDP QUIC listener. -// [#next-free-field: 8] -message QuicProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.QuicProtocolOptions"; - - core.v4alpha.QuicProtocolOptions quic_protocol_options = 1; - - // Maximum number of milliseconds that connection will be alive when there is - // no network activity. 300000ms if not specified. - google.protobuf.Duration idle_timeout = 2; - - // Connection timeout in milliseconds before the crypto handshake is finished. - // 20000ms if not specified. - google.protobuf.Duration crypto_handshake_timeout = 3; - - // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults - // to enabled. - core.v4alpha.RuntimeFeatureFlag enabled = 4; - - // A multiplier to number of connections which is used to determine how many packets to read per - // event loop. A reasonable number should allow the listener to process enough payload but not - // starve TCP and other UDP sockets and also prevent long event loop duration. - // The default value is 32. This means if there are N QUIC connections, the total number of - // packets to read in each read event will be 32 * N. - // The actual number of packets to read in total by the UDP listener is also - // bound by 6000, regardless of this field or how many connections there are. - google.protobuf.UInt32Value packets_to_read_to_connection_count_ratio = 5 - [(validate.rules).uint32 = {gte: 1}]; - - // Configure which implementation of `quic::QuicCryptoClientStreamBase` to be used for this listener. - // If not specified the :ref:`QUICHE default one configured by ` will be used. - // [#extension-category: envoy.quic.server.crypto_stream] - core.v4alpha.TypedExtensionConfig crypto_stream_config = 6; - - // Configure which implementation of `quic::ProofSource` to be used for this listener. - // If not specified the :ref:`default one configured by ` will be used. - // [#extension-category: envoy.quic.proof_source] - core.v4alpha.TypedExtensionConfig proof_source_config = 7; -} diff --git a/api/envoy/config/listener/v4alpha/udp_listener_config.proto b/api/envoy/config/listener/v4alpha/udp_listener_config.proto deleted file mode 100644 index 3cd272de3172e..0000000000000 --- a/api/envoy/config/listener/v4alpha/udp_listener_config.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "envoy/config/core/v4alpha/udp_socket_config.proto"; -import "envoy/config/listener/v4alpha/quic_config.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "UdpListenerConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: UDP listener config] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 8] -message UdpListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.UdpListenerConfig"; - - reserved 1, 2, 3, 4, 6; - - reserved "config"; - - // UDP socket configuration for the listener. The default for - // :ref:`prefer_gro ` is false for - // listener sockets. If receiving a large amount of datagrams from a small number of sources, it - // may be worthwhile to enable this option after performance testing. - core.v4alpha.UdpSocketConfig downstream_socket_config = 5; - - // Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set - // to the default object to enable QUIC without modifying any additional options. - // - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - QuicProtocolOptions quic_options = 7; -} - -message ActiveRawUdpListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ActiveRawUdpListenerConfig"; -} diff --git a/api/envoy/config/metrics/v3/metrics_service.proto b/api/envoy/config/metrics/v3/metrics_service.proto index 1cdd6d183e9db..df3c71e6a6308 100644 --- a/api/envoy/config/metrics/v3/metrics_service.proto +++ b/api/envoy/config/metrics/v3/metrics_service.proto @@ -21,6 +21,17 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. +// +// Example: +// +// .. code-block:: yaml +// +// stats_sinks: +// - name: envoy.stat_sinks.metrics_service +// typed_config: +// "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig +// transport_api_version: V3 +// // [#extension: envoy.stat_sinks.metrics_service] message MetricsServiceConfig { option (udpa.annotations.versioning).previous_message_type = diff --git a/api/envoy/config/metrics/v4alpha/BUILD b/api/envoy/config/metrics/v4alpha/BUILD deleted file mode 100644 index 9f8473e290ae3..0000000000000 --- a/api/envoy/config/metrics/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/metrics/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/metrics/v4alpha/metrics_service.proto b/api/envoy/config/metrics/v4alpha/metrics_service.proto deleted file mode 100644 index fe530b34e6908..0000000000000 --- a/api/envoy/config/metrics/v4alpha/metrics_service.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Metrics service] - -// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink -// `. This opaque configuration will be used to create -// Metrics Service. -// [#extension: envoy.stat_sinks.metrics_service] -message MetricsServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.MetricsServiceConfig"; - - // The upstream gRPC cluster that hosts the metrics service. - core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; - - // API version for metric service transport protocol. This describes the metric service gRPC - // endpoint and version of messages used on the wire. - core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; - - // If true, counters are reported as the delta between flushing intervals. Otherwise, the current - // counter value is reported. Defaults to false. - // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the - // sink will take updates from the :ref:`MetricsResponse `. - google.protobuf.BoolValue report_counters_as_deltas = 2; - - // If true, metrics will have their tags emitted as labels on the metrics objects sent to the MetricsService, - // and the tag extracted name will be used instead of the full name, which may contain values used by the tag - // extractor or additional tags added during stats creation. - bool emit_tags_as_labels = 4; -} diff --git a/api/envoy/config/metrics/v4alpha/stats.proto b/api/envoy/config/metrics/v4alpha/stats.proto deleted file mode 100644 index 6d8a94050d65a..0000000000000 --- a/api/envoy/config/metrics/v4alpha/stats.proto +++ /dev/null @@ -1,411 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; -option java_outer_classname = "StatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - -// Configuration for pluggable stats sinks. -message StatsSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsSink"; - - reserved 2; - - reserved "config"; - - // The name of the stats sink to instantiate. The name must match a supported - // stats sink. - // See the :ref:`extensions listed in typed_config below ` for the default list of available stats sink. - // Sinks optionally support tagged/multiple dimensional metrics. - string name = 1; - - // Stats sink specific configuration which depends on the sink being instantiated. See - // :ref:`StatsdSink ` for an example. - // [#extension-category: envoy.stats_sinks] - oneof config_type { - google.protobuf.Any typed_config = 3; - } -} - -// Statistics configuration such as tagging. -message StatsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.StatsConfig"; - - // Each stat name is iteratively processed through these tag specifiers. - // When a tag is matched, the first capture group is removed from the name so - // later :ref:`TagSpecifiers ` cannot match that - // same portion of the match. - repeated TagSpecifier stats_tags = 1; - - // Use all default tag regexes specified in Envoy. These can be combined with - // custom tags specified in :ref:`stats_tags - // `. They will be processed before - // the custom tags. - // - // .. note:: - // - // If any default tags are specified twice, the config will be considered - // invalid. - // - // See :repo:`well_known_names.h ` for a list of the - // default tags in Envoy. - // - // If not provided, the value is assumed to be true. - google.protobuf.BoolValue use_all_default_tags = 2; - - // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated - // as normal. Preventing the instantiation of certain families of stats can improve memory - // performance for Envoys running especially large configs. - // - // .. warning:: - // Excluding stats may affect Envoy's behavior in undocumented ways. See - // `issue #8771 `_ for more information. - // If any unexpected behavior changes are observed, please open a new issue immediately. - StatsMatcher stats_matcher = 3; - - // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first - // match is applied. If no match is found (or if no rules are set), the following default buckets - // are used: - // - // .. code-block:: json - // - // [ - // 0.5, - // 1, - // 5, - // 10, - // 25, - // 50, - // 100, - // 250, - // 500, - // 1000, - // 2500, - // 5000, - // 10000, - // 30000, - // 60000, - // 300000, - // 600000, - // 1800000, - // 3600000 - // ] - repeated HistogramBucketSettings histogram_bucket_settings = 4; -} - -// Configuration for disabling stat instantiation. -message StatsMatcher { - // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to - // instantiate all stats, there is no need to construct a StatsMatcher. - // - // However, StatsMatcher can be used to limit the creation of families of stats in order to - // conserve memory. Stats can either be disabled entirely, or they can be - // limited by either an exclusion or an inclusion list of :ref:`StringMatcher - // ` protos: - // - // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to - // `false`, all stats will be instantiated. - // - // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the - // list will not instantiate. - // - // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of - // the StringMatchers in the list. - // - // - // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. - // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based - // matcher rather than a regex-based matcher. - // - // Example 1. Excluding all stats. - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "rejectAll": "true" - // } - // } - // - // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "exclusionList": { - // "patterns": [ - // { - // "prefix": "cluster." - // } - // ] - // } - // } - // } - // - // Example 3. Including only manager-related stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "inclusionList": { - // "patterns": [ - // { - // "prefix": "cluster_manager." - // }, - // { - // "prefix": "listener_manager." - // } - // ] - // } - // } - // } - // - - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.StatsMatcher"; - - oneof stats_matcher { - option (validate.required) = true; - - // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all - // stats are enabled. - bool reject_all = 1; - - // Exclusive match. All stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.v4alpha.ListStringMatcher exclusion_list = 2; - - // Inclusive match. No stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.v4alpha.ListStringMatcher inclusion_list = 3; - } -} - -// Designates a tag name and value pair. The value may be either a fixed value -// or a regex providing the value via capture groups. The specified tag will be -// unconditionally set if a fixed value, otherwise it will only be set if one -// or more capture groups in the regex match. -message TagSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.TagSpecifier"; - - // Attaches an identifier to the tag values to identify the tag being in the - // sink. Envoy has a set of default names and regexes to extract dynamic - // portions of existing stats, which can be found in :repo:`well_known_names.h - // ` in the Envoy repository. If a :ref:`tag_name - // ` is provided in the config and - // neither :ref:`regex ` or - // :ref:`fixed_value ` were specified, - // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. - // - // .. note:: - // - // It is invalid to specify the same tag name twice in a config. - string tag_name = 1; - - oneof tag_value { - // Designates a tag to strip from the tag extracted name and provide as a named - // tag value for all statistics. This will only occur if any part of the name - // matches the regex provided with one or more capture groups. - // - // The first capture group identifies the portion of the name to remove. The - // second capture group (which will normally be nested inside the first) will - // designate the value of the tag for the statistic. If no second capture - // group is provided, the first will also be used to set the value of the tag. - // All other capture groups will be ignored. - // - // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and - // one tag specifier: - // - // .. code-block:: json - // - // { - // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\\.((.+?)\\.)" - // } - // - // Note that the regex will remove ``foo_cluster.`` making the tag extracted - // name ``cluster.upstream_rq_timeout`` and the tag value for - // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no - // ``.`` character because of the second capture group). - // - // Example 2. a stat name - // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two - // tag specifiers: - // - // .. code-block:: json - // - // [ - // { - // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" - // }, - // { - // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\\.((.*?)\\.)" - // } - // ] - // - // The two regexes of the specifiers will be processed in the definition order. - // - // The first regex will remove ``ios.``, leaving the tag extracted name - // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - // ``envoy.http_user_agent`` will be added with tag value ``ios``. - // - // The second regex will remove ``connection_manager_1.`` from the tag - // extracted name produced by the first regex - // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - // ``envoy.http_conn_manager_prefix`` will be added with the tag value - // ``connection_manager_1``. - string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; - - // Specifies a fixed tag value for the ``tag_name``. - string fixed_value = 3; - } -} - -// Specifies a matcher for stats and the buckets that matching stats should use. -message HistogramBucketSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.HistogramBucketSettings"; - - // The stats that this rule applies to. The match is applied to the original stat name - // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. - type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; - - // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. - // The order of the buckets does not matter. - repeated double buckets = 2 [(validate.rules).repeated = { - min_items: 1 - unique: true - items {double {gt: 0.0}} - }]; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support -// tagged metrics. -// [#extension: envoy.stat_sinks.statsd] -message StatsdSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsdSink"; - - oneof statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - core.v4alpha.Address address = 1; - - // The name of a cluster that is running a TCP `statsd - // `_ compliant listener. If specified, - // Envoy will connect to this cluster to flush statistics. - string tcp_cluster_name = 2; - } - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms - string prefix = 3; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. -// The sink emits stats with `DogStatsD `_ -// compatible tags. Tags are configurable via :ref:`StatsConfig -// `. -// [#extension: envoy.stat_sinks.dog_statsd] -message DogStatsdSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.DogStatsdSink"; - - reserved 2; - - oneof dog_statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. - core.v4alpha.Address address = 1; - } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; - - // Optional max datagram size to use when sending UDP messages. By default Envoy - // will emit one metric per datagram. By specifying a max-size larger than a single - // metric, Envoy will emit multiple, new-line separated metrics. The max datagram - // size should not exceed your network's MTU. - // - // Note that this value may not be respected if smaller than a single metric. - google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. -// The sink emits stats in `text/event-stream -// `_ -// formatted stream for use by `Hystrix dashboard -// `_. -// -// Note that only a single HystrixSink should be configured. -// -// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. -// [#extension: envoy.stat_sinks.hystrix] -message HystrixSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.HystrixSink"; - - // The number of buckets the rolling statistical window is divided into. - // - // Each time the sink is flushed, all relevant Envoy statistics are sampled and - // added to the rolling window (removing the oldest samples in the window - // in the process). The sink then outputs the aggregate statistics across the - // current rolling window to the event stream(s). - // - // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets - // - // More detailed explanation can be found in `Hystrix wiki - // `_. - int64 num_buckets = 1; -} diff --git a/api/envoy/config/overload/v3/overload.proto b/api/envoy/config/overload/v3/overload.proto index 4445af6321137..85fa761dbdd8e 100644 --- a/api/envoy/config/overload/v3/overload.proto +++ b/api/envoy/config/overload/v3/overload.proto @@ -141,6 +141,26 @@ message OverloadAction { google.protobuf.Any typed_config = 3; } +// Configuration for which accounts the WatermarkBuffer Factories should +// track. +message BufferFactoryConfig { + // The minimum power of two at which Envoy starts tracking an account. + // + // Envoy has 8 power of two buckets starting with the provided exponent below. + // Concretely the 1st bucket contains accounts for streams that use + // [2^minimum_account_to_track_power_of_two, + // 2^(minimum_account_to_track_power_of_two + 1)) bytes. + // With the 8th bucket tracking accounts + // >= 128 * 2^minimum_account_to_track_power_of_two. + // + // The maximum value is 56, since we're using uint64_t for bytes counting, + // and that's the last value that would use the 8 buckets. In practice, + // we don't expect the proxy to be holding 2^56 bytes. + // + // If omitted, Envoy should not do any tracking. + uint32 minimum_account_to_track_power_of_two = 1 [(validate.rules).uint32 = {lte: 56 gte: 10}]; +} + message OverloadManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.overload.v2alpha.OverloadManager"; @@ -153,4 +173,7 @@ message OverloadManager { // The set of overload actions. repeated OverloadAction actions = 3; + + // Configuration for buffer factory. + BufferFactoryConfig buffer_factory_config = 4; } diff --git a/api/envoy/config/ratelimit/v4alpha/BUILD b/api/envoy/config/ratelimit/v4alpha/BUILD deleted file mode 100644 index f335ebe20e6b2..0000000000000 --- a/api/envoy/config/ratelimit/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/ratelimit/v4alpha/rls.proto b/api/envoy/config/ratelimit/v4alpha/rls.proto deleted file mode 100644 index 7a13efd7395e4..0000000000000 --- a/api/envoy/config/ratelimit/v4alpha/rls.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.config.ratelimit.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.ratelimit.v4alpha"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rate limit service] - -// Rate limit :ref:`configuration overview `. -message RateLimitServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.ratelimit.v3.RateLimitServiceConfig"; - - reserved 1, 3; - - // Specifies the gRPC service that hosts the rate limit service. The client - // will connect to this cluster when it needs to make rate limit service - // requests. - core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and - // version of messages used on the wire. - core.v4alpha.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/api/envoy/config/rbac/v3/BUILD b/api/envoy/config/rbac/v3/BUILD index c5246439c7b55..c289def1f11d2 100644 --- a/api/envoy/config/rbac/v3/BUILD +++ b/api/envoy/config/rbac/v3/BUILD @@ -10,6 +10,7 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", diff --git a/api/envoy/config/rbac/v3/rbac.proto b/api/envoy/config/rbac/v3/rbac.proto index 44b3cf7cee6ec..d66f9be2b4981 100644 --- a/api/envoy/config/rbac/v3/rbac.proto +++ b/api/envoy/config/rbac/v3/rbac.proto @@ -7,6 +7,7 @@ import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/path.proto"; import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/v3/range.proto"; import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; @@ -145,7 +146,7 @@ message Policy { } // Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 11] +// [#next-free-field: 12] message Permission { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission"; @@ -185,6 +186,9 @@ message Permission { // A port number that describes the destination port connecting to. uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; + // A port number range that describes a range of destination ports connecting to. + type.v3.Int32Range destination_port_range = 11; + // Metadata that describes additional information about the action. type.matcher.v3.MetadataMatcher metadata = 7; diff --git a/api/envoy/config/rbac/v4alpha/BUILD b/api/envoy/config/rbac/v4alpha/BUILD deleted file mode 100644 index f5683a61a2867..0000000000000 --- a/api/envoy/config/rbac/v4alpha/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/rbac/v3:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - ], -) diff --git a/api/envoy/config/rbac/v4alpha/rbac.proto b/api/envoy/config/rbac/v4alpha/rbac.proto deleted file mode 100644 index bd56c0c3dc326..0000000000000 --- a/api/envoy/config/rbac/v4alpha/rbac.proto +++ /dev/null @@ -1,299 +0,0 @@ -syntax = "proto3"; - -package envoy.config.rbac.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/metadata.proto"; -import "envoy/type/matcher/v4alpha/path.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/api/expr/v1alpha1/checked.proto"; -import "google/api/expr/v1alpha1/syntax.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.rbac.v4alpha"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Role Based Access Control (RBAC)] - -// Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. Requests are allowed or denied based on the `action` and whether a matching policy is -// found. For instance, if the action is ALLOW and a matching policy is found the request should be -// allowed. -// -// RBAC can also be used to make access logging decisions by communicating with access loggers -// through dynamic metadata. When the action is LOG and at least one policy matches, the -// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating -// the request should be logged. -// -// Here is an example of RBAC configuration. It has two policies: -// -// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so -// does "cluster.local/ns/default/sa/superuser". -// -// * Any user can read ("GET") the service at paths with prefix "/products", so long as the -// destination port is either 80 or 443. -// -// .. code-block:: yaml -// -// action: ALLOW -// policies: -// "service-admin": -// permissions: -// - any: true -// principals: -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/admin" -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/superuser" -// "product-viewer": -// permissions: -// - and_rules: -// rules: -// - header: -// name: ":method" -// string_match: -// exact: "GET" -// - url_path: -// path: { prefix: "/products" } -// - or_rules: -// rules: -// - destination_port: 80 -// - destination_port: 443 -// principals: -// - any: true -// -message RBAC { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.RBAC"; - - // Should we do safe-list or block-list style access control? - enum Action { - // The policies grant access to principals. The rest are denied. This is safe-list style - // access control. This is the default type. - ALLOW = 0; - - // The policies deny access to principals. The rest are allowed. This is block-list style - // access control. - DENY = 1; - - // The policies set the `access_log_hint` dynamic metadata key based on if requests match. - // All requests are allowed. - LOG = 2; - } - - // The action to take if a policy matches. Every action either allows or denies a request, - // and can also carry out action-specific operations. - // - // Actions: - // - // * ALLOW: Allows the request if and only if there is a policy that matches - // the request. - // * DENY: Allows the request if and only if there are no policies that - // match the request. - // * LOG: Allows all requests. If at least one policy matches, the dynamic - // metadata key `access_log_hint` is set to the value `true` under the shared - // key namespace 'envoy.common'. If no policies match, it is set to `false`. - // Other actions do not modify this key. - // - Action action = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maps from policy name to policy. A match occurs when at least one policy matches the request. - // The policies are evaluated in lexicographic order of the policy name. - map policies = 2; -} - -// Policy specifies a role and the principals that are assigned/denied the role. -// A policy matches if and only if at least one of its permissions match the -// action taking place AND at least one of its principals match the downstream -// AND the condition is true if specified. -message Policy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; - - // Required. The set of permissions that define a role. Each permission is - // matched with OR semantics. To match all actions for this policy, a single - // Permission with the `any` field set to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Required. The set of principals that are assigned/denied the role based on - // “action”. Each principal is matched with OR semantics. To match all - // downstreams for this policy, a single Principal with the `any` field set to - // true should be used. - repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - - oneof expression_specifier { - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - // Only be used when checked_condition is not used. - google.api.expr.v1alpha1.Expr condition = 3; - - // [#not-implemented-hide:] - // An optional symbolic expression that has been successfully type checked. - // Only be used when condition is not used. - google.api.expr.v1alpha1.CheckedExpr checked_condition = 4; - } -} - -// Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 11] -message Permission { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Permission"; - - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, - // each are applied with the associated behavior. - message Set { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v3.Permission.Set"; - - repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof rule { - option (validate.required) = true; - - // A set of rules that all must match in order to define the action. - Set and_rules = 1; - - // A set of rules where at least one must match in order to define the action. - Set or_rules = 2; - - // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. - route.v4alpha.HeaderMatcher header = 4; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v4alpha.PathMatcher url_path = 10; - - // A CIDR block that describes the destination IP. - core.v4alpha.CidrRange destination_ip = 5; - - // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; - - // Metadata that describes additional information about the action. - type.matcher.v4alpha.MetadataMatcher metadata = 7; - - // Negates matching the provided permission. For instance, if the value of - // `not_rule` would match, this permission would not match. Conversely, if - // the value of `not_rule` would not match, this permission would match. - Permission not_rule = 8; - - // The request server from the client's connection request. This is - // typically TLS SNI. - // - // .. attention:: - // - // The behavior of this field may be affected by how Envoy is configured - // as explained below. - // - // * If the :ref:`TLS Inspector ` - // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name - // `, - // a TLS connection's requested SNI server name will be treated as if it - // wasn't present. - // - // * A :ref:`listener filter ` may - // overwrite a connection's requested server name within Envoy. - // - // Please refer to :ref:`this FAQ entry ` to learn to - // setup SNI. - type.matcher.v4alpha.StringMatcher requested_server_name = 9; - } -} - -// Principal defines an identity or a group of identities for a downstream -// subject. -// [#next-free-field: 12] -message Principal { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; - - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. - // Depending on the context, each are applied with the associated behavior. - message Set { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v3.Principal.Set"; - - repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Authentication attributes for a downstream. - message Authenticated { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v3.Principal.Authenticated"; - - reserved 1; - - // The name of the principal. If set, The URI SAN or DNS SAN in that order - // is used from the certificate, otherwise the subject field is used. If - // unset, it applies to any user that is authenticated. - type.matcher.v4alpha.StringMatcher principal_name = 2; - } - - reserved 5; - - reserved "source_ip"; - - oneof identifier { - option (validate.required) = true; - - // A set of identifiers that all must match in order to define the - // downstream. - Set and_ids = 1; - - // A set of identifiers at least one must match in order to define the - // downstream. - Set or_ids = 2; - - // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // Authenticated attributes that identify the downstream. - Authenticated authenticated = 4; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is - // inferred from for example the x-forwarder-for header, proxy protocol, - // etc. - core.v4alpha.CidrRange direct_remote_ip = 10; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip - // `. E.g, if the - // remote ip is inferred from for example the x-forwarder-for header, proxy - // protocol, etc. - core.v4alpha.CidrRange remote_ip = 11; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP - // request. Only available for HTTP request. Note: the pseudo-header :path - // includes the query and fragment string. Use the `url_path` field if you - // want to match the URL path without the query and fragment string. - route.v4alpha.HeaderMatcher header = 6; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v4alpha.PathMatcher url_path = 9; - - // Metadata that describes additional information about the principal. - type.matcher.v4alpha.MetadataMatcher metadata = 7; - - // Negates matching the provided principal. For instance, if the value of - // `not_id` would match, this principal would not match. Conversely, if the - // value of `not_id` would not match, this principal would match. - Principal not_id = 8; - } -} diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index dfb8b8ed1a158..e6be0c43ed0ac 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -5,6 +5,7 @@ package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/metadata/v3/metadata.proto"; @@ -311,7 +312,7 @@ message Route { message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster"; - // [#next-free-field: 12] + // [#next-free-field: 13] message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster.ClusterWeight"; @@ -320,9 +321,31 @@ message WeightedCluster { reserved "per_filter_config"; + // Only one of *name* and *cluster_header* may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_len: 1}]; + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier"]; + + // Only one of *name* and *cluster_header* may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string cluster_header = 12 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier" + ]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -405,7 +428,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 13] +// [#next-free-field: 14] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; @@ -518,6 +541,12 @@ message RouteMatch { // // [#next-major-version: unify with RBAC] TlsContextMatchOptions tls_context = 11; + + // Specifies a set of dynamic metadata matchers on which the route should match. + // The router will check the dynamic metadata against all the specified dynamic metadata matchers. + // If the number of specified dynamic metadata matchers is nonzero, they all must match the + // dynamic metadata for a match to occur. + repeated type.matcher.v3.MetadataMatcher dynamic_metadata = 13; } // [#next-free-field: 12] diff --git a/api/envoy/config/route/v4alpha/BUILD b/api/envoy/config/route/v4alpha/BUILD deleted file mode 100644 index 89fa4149b8795..0000000000000 --- a/api/envoy/config/route/v4alpha/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/metadata/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/route/v4alpha/route.proto b/api/envoy/config/route/v4alpha/route.proto deleted file mode 100644 index 4a19386824821..0000000000000 --- a/api/envoy/config/route/v4alpha/route.proto +++ /dev/null @@ -1,146 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP route configuration] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// [#next-free-field: 13] -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteConfiguration"; - - // The name of the route configuration. For example, it might match - // :ref:`route_config_name - // ` in - // :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. - string name = 1; - - // An array of virtual hosts that make up the route table. - repeated VirtualHost virtual_hosts = 2; - - // An array of virtual hosts will be dynamically loaded via the VHDS API. - // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - // on-demand discovery of virtual hosts. The contents of these two fields will be merged to - // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - // taking precedence. - Vhds vhds = 9; - - // Optionally specifies a list of HTTP headers that the connection manager - // will consider to be internal only. If they are found on external requests they will be cleaned - // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information. - repeated string internal_only_headers = 3 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each response that - // the connection manager encodes. Headers specified at this level are applied - // after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // that the connection manager encodes. - repeated string response_headers_to_remove = 5 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each request - // routed by the HTTP connection manager. Headers specified at this level are - // applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // routed by the HTTP connection manager. - repeated string request_headers_to_remove = 8 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // By default, headers that should be added/removed are evaluated from most to least specific: - // - // * route level - // * virtual host level - // * connection manager level - // - // To allow setting overrides at the route or virtual host level, this order can be reversed - // by setting this option to true. Defaults to false. - // - // [#next-major-version: In the v3 API, this will default to true.] - bool most_specific_header_mutations_wins = 10; - - // An optional boolean that specifies whether the clusters that the route - // table refers to will be validated by the cluster manager. If set to true - // and a route refers to a non-existent cluster, the route table will not - // load. If set to false and a route refers to a non-existent cluster, the - // route table will load and the router filter will return a 404 if the route - // is selected at runtime. This setting defaults to true if the route table - // is statically defined via the :ref:`route_config - // ` - // option. This setting default to false if the route table is loaded dynamically via the - // :ref:`rds - // ` - // option. Users may wish to override the default behavior in certain cases (for example when - // using CDS with a static route table). - google.protobuf.BoolValue validate_clusters = 7; - - // The maximum bytes of the response :ref:`direct response body - // ` size. If not specified the default - // is 4096. - // - // .. warning:: - // - // Envoy currently holds the content of :ref:`direct response body - // ` in memory. Be careful setting - // this to be larger than the default 4KB, since the allocated memory for direct response body - // is not subject to data plane buffering controls. - // - google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; - - // [#not-implemented-hide:] - // A list of plugins and their configurations which may be used by a - // :ref:`envoy_v3_api_field_config.route.v3.RouteAction.cluster_specifier_plugin` - // within the route. All *extension.name* fields in this list must be unique. - repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; -} - -// Configuration for a cluster specifier plugin. -message ClusterSpecifierPlugin { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.ClusterSpecifierPlugin"; - - // The name of the plugin and its opaque configuration. - core.v4alpha.TypedExtensionConfig extension = 1; -} - -message Vhds { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Vhds"; - - // Configuration source specifier for VHDS. - core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto deleted file mode 100644 index 9c0cc8f57d351..0000000000000 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ /dev/null @@ -1,1938 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/proxy_protocol.proto"; -import "envoy/type/matcher/v4alpha/regex.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/metadata/v3/metadata.proto"; -import "envoy/type/tracing/v3/custom_tag.proto"; -import "envoy/type/v3/percent.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; -option java_outer_classname = "RouteComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP route components] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// The top level element in the routing configuration is a virtual host. Each virtual host has -// a logical name as well as a set of domains that get routed to it based on the incoming request's -// host header. This allows a single listener to service multiple top level domain path trees. Once -// a virtual host is selected based on the domain, the routes are processed in order to see which -// upstream cluster to route to or whether to perform a redirect. -// [#next-free-field: 21] -message VirtualHost { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.VirtualHost"; - - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - - reserved 9, 12; - - reserved "per_filter_config"; - - // The logical name of the virtual host. This is used when emitting certain - // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the suffix or prefix form. - // - // Domain search order: - // 1. Exact domain names: ``www.foo.com``. - // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. - // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. - // 4. Special wildcard ``*`` matching any domain. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // The longest wildcards match first. - // Only a single virtual host in the entire route configuration can match on ``*``. A domain - // must be unique across all virtual hosts or the config will fail to load. - // - // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. - repeated string domains = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} - }]; - - // The list of routes that will be matched, in order, for incoming requests. - // The first route that matches will be used. - repeated Route routes = 3; - - // Specifies the type of TLS enforcement the virtual host expects. If this option is not - // specified, there is no TLS requirement for the virtual host. - TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; - - // A list of virtual clusters defined for this virtual host. Virtual clusters - // are used for additional statistics gathering. - repeated VirtualCluster virtual_clusters = 5; - - // Specifies a set of rate limit configurations that will be applied to the - // virtual host. - repeated RateLimit rate_limits = 6; - - // Specifies a list of HTTP headers that should be added to each request - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // handled by this virtual host. - repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of HTTP headers that should be added to each response - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // handled by this virtual host. - repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Indicates that the virtual host has a CORS policy. - CorsPolicy cors = 8; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 15; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the upstream request. Setting this option will cause it to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the upstream - // will see the attempt count as perceived by the second Envoy. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - // - // [#next-major-version: rename to include_attempt_count_in_request.] - bool include_request_attempt_count = 14; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the downstream response. Setting this option will cause the router to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the downstream - // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - bool include_attempt_count_in_response = 19; - - // Indicates the retry policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - RetryPolicy retry_policy = 16; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that setting a route level entry - // will take precedence over this config and it'll be treated independently (e.g.: values are not - // inherited). :ref:`Retry policy ` should not be - // set if this field is used. - google.protobuf.Any retry_policy_typed_config = 20; - - // Indicates the hedge policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - HedgePolicy hedge_policy = 17; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum - // value of this and the listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; -} - -// A filter-defined action type. -message FilterAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.FilterAction"; - - google.protobuf.Any action = 1; -} - -// A route is both a specification of how to match a request as well as an indication of what to do -// next (e.g., redirect, forward, rewrite, etc.). -// -// .. attention:: -// -// Envoy supports routing on HTTP method via :ref:`header matching -// `. -// [#next-free-field: 19] -message Route { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Route"; - - reserved 6, 8; - - reserved "per_filter_config"; - - // Name for the route. - string name = 14; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - oneof action { - option (validate.required) = true; - - // Route request to some upstream cluster. - RouteAction route = 2; - - // Return a redirect. - RedirectAction redirect = 3; - - // Return an arbitrary HTTP response directly, without proxying. - DirectResponseAction direct_response = 7; - - // [#not-implemented-hide:] - // A filter-defined action (e.g., it could dynamically generate the RouteAction). - // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when - // implemented] - FilterAction filter_action = 17; - - // [#not-implemented-hide:] - // An action used when the route will generate a response directly, - // without forwarding to an upstream host. This will be used in non-proxy - // xDS clients like the gRPC server. It could also be used in the future - // in Envoy for a filter that directly generates responses for requests. - NonForwardingAction non_forwarding_action = 18; - } - - // The Metadata field can be used to provide additional information - // about the route. It can be used for configuration, stats, and logging. - // The metadata should go under the filter namespace that will need it. - // For instance, if the metadata is intended for the Router filter, - // the filter name should be specified as *envoy.filters.http.router*. - core.v4alpha.Metadata metadata = 4; - - // Decorator for the matched route. - Decorator decorator = 5; - - // The typed_per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 13; - - // Specifies a set of headers that will be added to requests matching this - // route. Headers specified at this level are applied before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // matching this route. - repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a set of headers that will be added to responses to requests - // matching this route. Headers specified at this level are applied before - // headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on - // :ref:`custom request headers `. - repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - Tracing tracing = 15; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; -} - -// Compared to the :ref:`cluster ` field that specifies a -// single upstream cluster as the target of a request, the :ref:`weighted_clusters -// ` option allows for specification of -// multiple upstream clusters along with weights that indicate the percentage of -// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the -// weights. -message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.WeightedCluster"; - - // [#next-free-field: 12] - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.WeightedCluster.ClusterWeight"; - - reserved 7, 8; - - reserved "per_filter_config"; - - // Name of the upstream cluster. The cluster must exist in the - // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // An integer between 0 and :ref:`total_weight - // `. When a request matches the route, - // the choice of an upstream cluster is determined by its weight. The sum of weights across all - // entries in the clusters array must add up to the total_weight, which defaults to 100. - google.protobuf.UInt32Value weight = 2; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered for - // load balancing. Note that this will be merged with what's provided in - // :ref:`RouteAction.metadata_match `, with - // values here taking precedence. The filter name should be specified as *envoy.lb*. - core.v4alpha.Metadata metadata_match = 3; - - // Specifies a list of headers to be added to requests when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request when - // this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of headers to be added to responses when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of headers to be removed from responses when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 10; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 11 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the total weight across all clusters. The sum of all cluster weights must equal this - // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies the runtime key prefix that should be used to construct the - // runtime keys associated with each cluster. When the *runtime_key_prefix* is - // specified, the router will look for weights associated with each upstream - // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - // *cluster[i]* denotes an entry in the clusters array field. If the runtime - // key for the cluster does not exist, the value specified in the - // configuration file will be used as the default weight. See the :ref:`runtime documentation - // ` for how key names map to the underlying implementation. - string runtime_key_prefix = 2; -} - -// [#next-free-field: 13] -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; - - message GrpcRouteMatchOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions"; - } - - message TlsContextMatchOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteMatch.TlsContextMatchOptions"; - - // If specified, the route will match against whether or not a certificate is presented. - // If not specified, certificate presentation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue presented = 1; - - // If specified, the route will match against whether or not a certificate is validated. - // If not specified, certificate validation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue validated = 2; - } - - // An extensible message for matching CONNECT requests. - message ConnectMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteMatch.ConnectMatcher"; - } - - reserved 5, 3; - - reserved "regex"; - - oneof path_specifier { - option (validate.required) = true; - - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - string prefix = 1; - - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - string path = 2; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - - // If this is used as the matcher, the matcher will only match CONNECT requests. - // Note that this will not match HTTP/2 upgrade-style CONNECT requests - // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style - // upgrades. - // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where Extended CONNECT requests may have a path, the path matchers will work if - // there is a path present. - // Note that CONNECT support is currently considered alpha in Envoy. - // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] - ConnectMatcher connect_matcher = 12; - } - - // Indicates that prefix/path matching should be case sensitive. The default - // is true. Ignored for safe_regex matching. - google.protobuf.BoolValue case_sensitive = 4; - - // Indicates that the route should additionally match on a runtime key. Every time the route - // is considered for a match, it must also fall under the percentage of matches indicated by - // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the router continues to evaluate the remaining match criteria. A runtime_fraction - // route configuration can be used to roll out route changes in a gradual manner without full - // code/config deploys. Refer to the :ref:`traffic shifting - // ` docs for additional documentation. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - // integer with the assumption that the value is an integral percentage out of 100. For - // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - core.v4alpha.RuntimeFractionalPercent runtime_fraction = 9; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). - repeated HeaderMatcher headers = 6; - - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - repeated QueryParameterMatcher query_parameters = 7; - - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - GrpcRouteMatchOptions grpc = 8; - - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - TlsContextMatchOptions tls_context = 11; -} - -// [#next-free-field: 12] -message CorsPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.CorsPolicy"; - - reserved 1, 8, 7; - - reserved "allow_origin", "allow_origin_regex", "enabled"; - - // Specifies string patterns that match allowed origins. An origin is allowed if any of the - // string matchers match. - repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11; - - // Specifies the content for the *access-control-allow-methods* header. - string allow_methods = 2; - - // Specifies the content for the *access-control-allow-headers* header. - string allow_headers = 3; - - // Specifies the content for the *access-control-expose-headers* header. - string expose_headers = 4; - - // Specifies the content for the *access-control-max-age* header. - string max_age = 5; - - // Specifies whether the resource allows credentials. - google.protobuf.BoolValue allow_credentials = 6; - - oneof enabled_specifier { - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; - } - - // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not - // enforced. - // - // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those - // fields have to explicitly disable the filter in order for this setting to take effect. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* to determine if it's valid but will not enforce any policies. - core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; -} - -// [#next-free-field: 38] -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; - - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - // - // .. note:: - // - // Shadowing will not be triggered if the primary cluster does not exist. - message RequestMirrorPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.RequestMirrorPolicy"; - - reserved 2; - - reserved "runtime_key"; - - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - core.v4alpha.RuntimeFractionalPercent runtime_fraction = 3; - - // Determines if the trace span should be sampled. Defaults to true. - google.protobuf.BoolValue trace_sampled = 4; - } - - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - // [#next-free-field: 7] - message HashPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy"; - - message Header { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.Header"; - - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If specified, the request header value will be rewritten and used - // to produce the hash key. - type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.Cookie"; - - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties"; - - // Hash on source IP address. - bool source_ip = 1; - } - - message QueryParameter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter"; - - // The name of the URL query parameter that will be used to obtain the hash - // key. If the parameter is not present, no hash will be produced. Query - // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_len: 1}]; - } - - message FilterState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.FilterState"; - - // The name of the Object in the per-request filterState, which is an - // Envoy::Http::Hashable object. If there is no data associated with the key, - // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - - // Query parameter hash policy. - QueryParameter query_parameter = 5; - - // Filter state hash policy. - FilterState filter_state = 6; - } - - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:`upgrade_configs - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.UpgradeConfig"; - - // Configuration for sending data upstream as a raw data payload. This is used for - // CONNECT or POST requests, when forwarding request payload as raw TCP. - message ConnectConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; - - // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. - core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1; - - // If set, the route will also allow forwarding POST payload as raw TCP. - bool allow_post = 2; - } - - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - - // Configuration for sending data upstream as a raw data payload. This is used for - // CONNECT requests, when forwarding CONNECT payload as raw TCP. - // Note that CONNECT support is currently considered alpha in Envoy. - // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] - ConnectConfig connect_config = 3; - } - - message MaxStreamDuration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.MaxStreamDuration"; - - // Specifies the maximum duration allowed for streams on the route. If not specified, the value - // from the :ref:`max_stream_duration - // ` field in - // :ref:`HttpConnectionManager.common_http_protocol_options - // ` - // is used. If this field is set explicitly to zero, any - // HttpConnectionManager max_stream_duration timeout will be disabled for - // this route. - google.protobuf.Duration max_stream_duration = 1; - - // If present, and the request contains a `grpc-timeout header - // `_, use that value as the - // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. - // If set to 0, the `grpc-timeout` header is used without modification. - google.protobuf.Duration grpc_timeout_header_max = 2; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by - // subtracting the provided duration from the header. This is useful for allowing Envoy to set - // its global timeout to be less than that of the deadline imposed by the calling client, which - // makes it more likely that Envoy will handle the timeout instead of having the call canceled - // by the client. If, after applying the offset, the resulting timeout is zero or negative, - // the stream will timeout immediately. - google.protobuf.Duration grpc_timeout_header_offset = 3; - } - - reserved 12, 18, 19, 16, 22, 21, 10, 14, 23, 28, 26, 31; - - reserved "request_mirror_policy", "include_vh_rate_limits", "max_grpc_timeout", - "grpc_timeout_offset", "internal_redirect_action", "max_internal_redirects"; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string cluster_header = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - WeightedCluster weighted_clusters = 3; - - // [#not-implemented-hide:] - // Name of the cluster specifier plugin to use to determine the cluster for - // requests on this route. The plugin name must be defined in the associated - // :ref:`envoy_v3_api_field_config.route.v3.RouteConfiguration.cluster_specifier_plugins` - // in the - // :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. - string cluster_specifier_plugin = 37; - } - - // The HTTP status code to use when configured cluster is not found. - // The default response code is 503 Service Unavailable. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what's set in this field will be considered - // for load balancing. If using :ref:`weighted_clusters - // `, metadata will be merged, with values - // provided there taking precedence. The filter name should be specified as *envoy.lb*. - core.v4alpha.Metadata metadata_match = 4; - - // Indicates that during forwarding, the matched prefix (or path) should be - // swapped with this value. This option allows application URLs to be rooted - // at a different path from those exposed at the reverse proxy layer. The router filter will - // place the original path before rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of *prefix_rewrite* or - // :ref:`regex_rewrite ` - // may be specified. - // - // .. attention:: - // - // Pay careful attention to the use of trailing slashes in the - // :ref:`route's match ` prefix value. - // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single - // :ref:`Route `, as shown by the below config entries: - // - // .. code-block:: yaml - // - // - match: - // prefix: "/prefix/" - // route: - // prefix_rewrite: "/" - // - match: - // prefix: "/prefix" - // route: - // prefix_rewrite: "/" - // - // Having above entries in the config, requests to */prefix* will be stripped to */*, while - // requests to */prefix/etc* will be stripped to */etc*. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. The router filter will place the original path as it was - // before the rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of :ref:`prefix_rewrite ` - // or *regex_rewrite* may be specified. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 6 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - google.protobuf.BoolValue auto_host_rewrite = 7; - - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string host_rewrite_header = 29 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Indicates that during forwarding, the host header will be swapped with - // the result of the regex substitution executed on path value with query and fragment removed. - // This is useful for transitioning variable content between path segment and subdomain. - // - // For example with the following config: - // - // .. code-block:: yaml - // - // host_rewrite_path_regex: - // pattern: - // google_re2: {} - // regex: "^/(.+)/.+$" - // substitution: \1 - // - // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`. - type.matcher.v4alpha.RegexMatchAndSubstitute host_rewrite_path_regex = 35; - } - - // Specifies the upstream timeout for the route. If not specified, the default is 15s. This - // spans between the point at which the entire downstream request (i.e. end-of-stream) has been - // processed and when the upstream response has been completely processed. A value of 0 will - // disable the route's timeout. - // - // .. note:: - // - // This timeout includes all retries. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration timeout = 8; - - // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - // although the connection manager wide :ref:`stream_idle_timeout - // ` - // will still apply. A value of 0 will completely disable the route's idle timeout, even if a - // connection manager stream idle timeout is configured. - // - // The idle timeout is distinct to :ref:`timeout - // `, which provides an upper bound - // on the upstream response time; :ref:`idle_timeout - // ` instead bounds the amount - // of time the request's stream may be idle. - // - // After header decoding, the idle timeout will apply on downstream and - // upstream request events. Each time an encode/decode event for headers or - // data is processed for the stream, the timer will be reset. If the timeout - // fires, the stream is terminated with a 408 Request Timeout error code if no - // upstream response header has been received, otherwise a stream reset - // occurs. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled according to the value for - // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - google.protobuf.Duration idle_timeout = 24; - - // Indicates that the route has a retry policy. Note that if this is set, - // it'll take precedence over the virtual host level retry policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - RetryPolicy retry_policy = 9; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that if this is set, it'll take - // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, - // most internal one becomes the enforced policy). :ref:`Retry policy ` - // should not be set if this field is used. - google.protobuf.Any retry_policy_typed_config = 33; - - // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 30; - - // Optionally specifies the :ref:`routing priority `. - core.v4alpha.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; - - // Specifies a set of rate limit configurations that could be applied to the - // route. - repeated RateLimit rate_limits = 13; - - // Specifies a list of hash policies to use for ring hash load balancing. Each - // hash policy is evaluated individually and the combined result is used to - // route the request. The method of combination is deterministic such that - // identical lists of hash policies will produce the same hash. Since a hash - // policy examines specific parts of a request, it can fail to produce a hash - // (i.e. if the hashed header is not present). If (and only if) all configured - // hash policies fail to generate a hash, no hash will be produced for - // the route. In this case, the behavior is the same as if no hash policies - // were specified (i.e. the ring hash load balancer will choose a random - // backend). If a hash policy has the "terminal" attribute set to true, and - // there is already a hash generated, the hash is returned immediately, - // ignoring the rest of the hash policy list. - repeated HashPolicy hash_policy = 15; - - // Indicates that the route has a CORS policy. - CorsPolicy cors = 17; - - repeated UpgradeConfig upgrade_configs = 25; - - // If present, Envoy will try to follow an upstream redirect response instead of proxying the - // response back to the downstream. An upstream redirect response is defined - // by :ref:`redirect_response_codes - // `. - InternalRedirectPolicy internal_redirect_policy = 34; - - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - HedgePolicy hedge_policy = 27; - - // Specifies the maximum stream duration for this route. - MaxStreamDuration max_stream_duration = 36; -} - -// HTTP retry :ref:`architecture overview `. -// [#next-free-field: 12] -message RetryPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy"; - - enum ResetHeaderFormat { - SECONDS = 0; - UNIX_TIMESTAMP = 1; - } - - message RetryPriority { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.RetryPriority"; - - reserved 2; - - reserved "config"; - - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // [#extension-category: envoy.retry_priorities] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - message RetryHostPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.RetryHostPredicate"; - - reserved 2; - - reserved "config"; - - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // [#extension-category: envoy.retry_host_predicates] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - message RetryBackOff { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.RetryBackOff"; - - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - message ResetHeader { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.ResetHeader"; - - // The name of the reset header. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The format of the reset header. - ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // A retry back-off strategy that applies when the upstream server rate limits - // the request. - // - // Given this configuration: - // - // .. code-block:: yaml - // - // rate_limited_retry_back_off: - // reset_headers: - // - name: Retry-After - // format: SECONDS - // - name: X-RateLimit-Reset - // format: UNIX_TIMESTAMP - // max_interval: "300s" - // - // The following algorithm will apply: - // - // 1. If the response contains the header ``Retry-After`` its value must be on - // the form ``120`` (an integer that represents the number of seconds to - // wait before retrying). If so, this value is used as the back-off interval. - // 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its - // value must be on the form ``1595320702`` (an integer that represents the - // point in time at which to retry, as a Unix timestamp in seconds). If so, - // the current time is subtracted from this value and the result is used as - // the back-off interval. - // 3. Otherwise, Envoy will use the default - // :ref:`exponential back-off ` - // strategy. - // - // No matter which format is used, if the resulting back-off interval exceeds - // ``max_interval`` it is discarded and the next header in ``reset_headers`` - // is tried. If a request timeout is configured for the route it will further - // limit how long the request will be allowed to run. - // - // To prevent many clients retrying at the same point in time jitter is added - // to the back-off interval, so the resulting interval is decided by taking: - // ``random(interval, interval * 1.5)``. - // - // .. attention:: - // - // Configuring ``rate_limited_retry_back_off`` will not by itself cause a request - // to be retried. You will still need to configure the right retry policy to match - // the responses from the upstream server. - message RateLimitedRetryBackOff { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff"; - - // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) - // to match against the response. Headers are tried in order, and matched case - // insensitive. The first header to be parsed successfully is used. If no headers - // match the default exponential back-off is used instead. - repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the maximum back off interval that Envoy will allow. If a reset - // header contains an interval longer than this then it will be discarded and - // the next header will be tried. Defaults to 300 seconds. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - // Specifies the conditions under which retry takes place. These are the same - // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - string retry_on = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. These are the same conditions documented for - // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value max_retries = 2; - - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - // - // .. note:: - // - // If left unspecified, Envoy will use the global - // :ref:`route timeout ` for the request. - // Consequently, when using a :ref:`5xx ` based - // retry policy, a request that times out will not be retried as the total timeout budget - // would have been exhausted. - google.protobuf.Duration per_try_timeout = 3; - - // Specifies an implementation of a RetryPriority which is used to determine the - // distribution of load across priorities used for retries. Refer to - // :ref:`retry plugin configuration ` for more details. - RetryPriority retry_priority = 4; - - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - // for retries. If any of the predicates reject the host, host selection will be reattempted. - // Refer to :ref:`retry plugin configuration ` for more - // details. - repeated RetryHostPredicate retry_host_predicate = 5; - - // The maximum number of times host selection will be reattempted before giving up, at which - // point the host that was last selected will be routed to. If unspecified, this will default to - // retrying once. - int64 host_selection_retry_max_attempts = 6; - - // HTTP status codes that should trigger a retry in addition to those specified by retry_on. - repeated uint32 retriable_status_codes = 7; - - // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the - // default base interval is 25 milliseconds or, if set, the current value of the - // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - // describes Envoy's back-off algorithm. - RetryBackOff retry_back_off = 8; - - // Specifies parameters that control a retry back-off strategy that is used - // when the request is rate limited by the upstream server. The server may - // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to - // provide feedback to the client on how long to wait before retrying. If - // configured, this back-off strategy will be used instead of the - // default exponential back off strategy (configured using `retry_back_off`) - // whenever a response includes the matching headers. - RateLimitedRetryBackOff rate_limited_retry_back_off = 11; - - // HTTP response headers that trigger a retry if present in the response. A retry will be - // triggered if any of the header matches match the upstream response headers. - // The field is only consulted if 'retriable-headers' retry policy is active. - repeated HeaderMatcher retriable_headers = 9; - - // HTTP headers which must be present in the request for retries to be attempted. - repeated HeaderMatcher retriable_request_headers = 10; -} - -// HTTP request hedging :ref:`architecture overview `. -message HedgePolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.HedgePolicy"; - - // Specifies the number of initial requests that should be sent upstream. - // Must be at least 1. - // Defaults to 1. - // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies a probability that an additional upstream request should be sent - // on top of what is specified by initial_requests. - // Defaults to 0. - // [#not-implemented-hide:] - type.v3.FractionalPercent additional_request_chance = 2; - - // Indicates that a hedged request should be sent when the per-try timeout is hit. - // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. - // The first request to complete successfully will be the one returned to the caller. - // - // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. - // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client - // if there are no more retries left. - // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. - // - // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least - // one error code and specifies a maximum number of retries. - // - // Defaults to false. - bool hedge_on_per_try_timeout = 3; -} - -// [#next-free-field: 10] -message RedirectAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RedirectAction"; - - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - - // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection - oneof scheme_rewrite_specifier { - // The scheme portion of the URL will be swapped with "https". - bool https_redirect = 4; - - // The scheme portion of the URL will be swapped with this value. - string scheme_redirect = 7; - } - - // The host portion of the URL will be swapped with this value. - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The port value of the URL will be swapped with this value. - uint32 port_redirect = 8; - - oneof path_rewrite_specifier { - // The path portion of the URL will be swapped with this value. - // Please note that query string in path_redirect will override the - // request's query string and will not be stripped. - // - // For example, let's say we have the following routes: - // - // - match: { path: "/old-path-1" } - // redirect: { path_redirect: "/new-path-1" } - // - match: { path: "/old-path-2" } - // redirect: { path_redirect: "/new-path-2", strip-query: "true" } - // - match: { path: "/old-path-3" } - // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } - // - // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" - // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" - // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirection, the matched prefix (or path) - // should be swapped with this value. This option allows redirect URLs be dynamically created - // based on the request. - // - // .. attention:: - // - // Pay attention to the use of trailing slashes as mentioned in - // :ref:`RouteAction's prefix_rewrite `. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirect, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 9; - } - - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. - bool strip_query = 6; -} - -message DirectResponseAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.DirectResponseAction"; - - // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; - - // Specifies the content of the response body. If this setting is omitted, - // no body is included in the generated response. - // - // .. note:: - // - // Headers can be specified using *response_headers_to_add* in the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or - // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. - core.v4alpha.DataSource body = 2; -} - -// [#not-implemented-hide:] -message NonForwardingAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.NonForwardingAction"; -} - -message Decorator { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Decorator"; - - // The operation name associated with the request matched to this route. If tracing is - // enabled, this information will be used as the span name reported for this request. - // - // .. note:: - // - // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden - // by the :ref:`x-envoy-decorator-operation - // ` header. - string operation = 1 [(validate.rules).string = {min_len: 1}]; - - // Whether the decorated details should be propagated to the other party. The default is true. - google.protobuf.BoolValue propagate = 2; -} - -message Tracing { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Tracing"; - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.v3.FractionalPercent client_sampling = 1; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.FractionalPercent random_sampling = 2; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.FractionalPercent overall_sampling = 3; - - // A list of custom tags with unique tag name to create tags for the active span. - // It will take effect after merging with the :ref:`corresponding configuration - // ` - // configured in the HTTP connection manager. If two tags with the same name are configured - // each in the HTTP connection manager and the route level, the one configured here takes - // priority. - repeated type.tracing.v3.CustomTag custom_tags = 4; -} - -// A virtual cluster is a way of specifying a regex matching rule against -// certain important endpoints such that statistics are generated explicitly for -// the matched requests. The reason this is useful is that when doing -// prefix/path matching Envoy does not always know what the application -// considers to be an endpoint. Thus, it’s impossible for Envoy to generically -// emit per endpoint statistics. However, often systems have highly critical -// endpoints that they wish to get “perfect” statistics on. Virtual cluster -// statistics are perfect in the sense that they are emitted on the downstream -// side such that they include network level failures. -// -// Documentation for :ref:`virtual cluster statistics `. -// -// .. note:: -// -// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for -// every application endpoint. This is both not easily maintainable and as well the matching and -// statistics output are not free. -message VirtualCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.VirtualCluster"; - - reserved 1, 3; - - reserved "pattern", "method"; - - // Specifies a list of header matchers to use for matching requests. Each specified header must - // match. The pseudo-headers `:path` and `:method` can be used to match the request path and - // method, respectively. - repeated HeaderMatcher headers = 4; - - // Specifies the name of the virtual cluster. The virtual cluster name as well - // as the virtual host name are used when emitting statistics. The statistics are emitted by the - // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Global rate limiting :ref:`architecture overview `. -// Also applies to Local rate limiting :ref:`using descriptors `. -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - - // [#next-free-field: 10] - message Action { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action"; - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("source_cluster", "") - // - // is derived from the :option:`--service-cluster` option. - message SourceCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.SourceCluster"; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("destination_cluster", "") - // - // Once a request matches against a route table rule, a routed cluster is determined by one of - // the following :ref:`route table configuration ` - // settings: - // - // * :ref:`cluster ` indicates the upstream cluster - // to route to. - // * :ref:`weighted_clusters ` - // chooses a cluster randomly from a set of clusters with attributed weight. - // * :ref:`cluster_header ` indicates which - // header in the request contains the target cluster. - message DestinationCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.DestinationCluster"; - } - - // The following descriptor entry is appended when a header contains a key that matches the - // *header_name*: - // - // .. code-block:: cpp - // - // ("", "") - message RequestHeaders { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.RequestHeaders"; - - // The header name to be queried from the request headers. The header’s - // value is used to populate the value of the descriptor entry for the - // descriptor_key. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; - - // If set to true, Envoy skips the descriptor while calling rate limiting service - // when header is not present in the request. By default it skips calling the - // rate limiting service if this header is not present in the request. - bool skip_if_absent = 3; - } - - // The following descriptor entry is appended to the descriptor and is populated using the - // trusted address from :ref:`x-forwarded-for `: - // - // .. code-block:: cpp - // - // ("remote_address", "") - message RemoteAddress { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.RemoteAddress"; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("generic_key", "") - message GenericKey { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.GenericKey"; - - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; - - // An optional key to use in the descriptor entry. If not set it defaults - // to 'generic_key' as the descriptor key. - string descriptor_key = 2; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("header_match", "") - message HeaderValueMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.HeaderValueMatch"; - - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; - - // If set to true, the action will append a descriptor entry when the - // request matches the headers. If set to false, the action will append a - // descriptor entry when the request does not match the headers. The - // default value is true. - google.protobuf.BoolValue expect_match = 2; - - // Specifies a set of headers that the rate limit action should match - // on. The action will check the request’s headers against all the - // specified headers in the config. A match will happen if all the - // headers in the config are present in the request with the same values - // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; - } - - // The following descriptor entry is appended when the - // :ref:`dynamic metadata ` contains a key value: - // - // .. code-block:: cpp - // - // ("", "") - // - // .. attention:: - // This action has been deprecated in favor of the :ref:`metadata ` action - message DynamicMetaData { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; - - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // Metadata struct that defines the key and path to retrieve the string value. A match will - // only happen if the value in the dynamic metadata is of type string. - type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; - - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. - string default_value = 3; - } - - // The following descriptor entry is appended when the metadata contains a key value: - // - // .. code-block:: cpp - // - // ("", "") - message MetaData { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.MetaData"; - - enum Source { - // Query :ref:`dynamic metadata ` - DYNAMIC = 0; - - // Query :ref:`route entry metadata ` - ROUTE_ENTRY = 1; - } - - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // Metadata struct that defines the key and path to retrieve the string value. A match will - // only happen if the value in the metadata is of type string. - type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; - - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. - string default_value = 3; - - // Source of metadata - Source source = 4 [(validate.rules).enum = {defined_only: true}]; - } - - reserved 7; - - reserved "dynamic_metadata"; - - oneof action_specifier { - option (validate.required) = true; - - // Rate limit on source cluster. - SourceCluster source_cluster = 1; - - // Rate limit on destination cluster. - DestinationCluster destination_cluster = 2; - - // Rate limit on request headers. - RequestHeaders request_headers = 3; - - // Rate limit on remote address. - RemoteAddress remote_address = 4; - - // Rate limit on a generic key. - GenericKey generic_key = 5; - - // Rate limit on the existence of request headers. - HeaderValueMatch header_value_match = 6; - - // Rate limit on metadata. - MetaData metadata = 8; - - // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. - // [#extension-category: envoy.rate_limit_descriptors] - core.v4alpha.TypedExtensionConfig extension = 9; - } - } - - message Override { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Override"; - - // Fetches the override from the dynamic metadata. - message DynamicMetadata { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Override.DynamicMetadata"; - - // Metadata struct that defines the key and path to retrieve the struct value. - // The value must be a struct containing an integer "requests_per_unit" property - // and a "unit" property with a value parseable to :ref:`RateLimitUnit - // enum ` - type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; - } - - oneof override_specifier { - option (validate.required) = true; - - // Limit override from dynamic metadata. - DynamicMetadata dynamic_metadata = 1; - } - } - - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - - // A list of actions that are to be applied for this rate limit configuration. - // Order matters as the actions are processed sequentially and the descriptor - // is composed by appending descriptor entries in that sequence. If an action - // cannot append a descriptor entry, no descriptor is generated for the - // configuration. See :ref:`composing actions - // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; - - // An optional limit override to be appended to the descriptor produced by this - // rate limit configuration. If the override value is invalid or cannot be resolved - // from metadata, no override is provided. See :ref:`rate limit override - // ` for more information. - Override limit = 4; -} - -// .. attention:: -// -// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* -// header. Thus, if attempting to match on *Host*, match on *:authority* instead. -// -// .. attention:: -// -// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both -// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., -// -// .. code-block:: json -// -// { -// "name": ":method", -// "exact_match": "POST" -// } -// -// .. attention:: -// In the absence of any header match specifier, match will default to :ref:`present_match -// `. i.e, a request that has the :ref:`name -// ` header will match, regardless of the header's -// value. -// -// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] -// [#next-free-field: 14] -message HeaderMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.HeaderMatcher"; - - reserved 2, 3, 5, 4, 11, 9, 10, 12; - - reserved "regex_match", "exact_match", "safe_regex_match", "prefix_match", "suffix_match", - "contains_match"; - - // Specifies the name of the header in the request. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Specifies how the header match will be performed to route the request. - oneof header_match_specifier { - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting of - // an optional plus or minus sign followed by a sequence of digits. The rule will not match if - // the header value does not represent an integer. Match will fail for empty values, floating - // point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - // "-1somestring" - type.v3.Int64Range range_match = 6; - - // If specified as true, header match will be performed based on whether the header is in the - // request. If specified as false, header match will be performed based on whether the header is absent. - bool present_match = 7; - - // If specified, header match will be performed based on the string match of the header value. - type.matcher.v4alpha.StringMatcher string_match = 13; - } - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. - bool invert_match = 8; -} - -// Query parameter matching treats the query string of a request's :path header -// as an ampersand-separated list of keys and/or key=value elements. -// [#next-free-field: 7] -message QueryParameterMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.QueryParameterMatcher"; - - reserved 3, 4; - - reserved "value", "regex"; - - // Specifies the name of a key that must be present in the requested - // *path*'s query string. - string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; - - oneof query_parameter_match_specifier { - // Specifies whether a query parameter value should match against a string. - type.matcher.v4alpha.StringMatcher string_match = 5 - [(validate.rules).message = {required: true}]; - - // Specifies whether a query parameter should be present. - bool present_match = 6; - } -} - -// HTTP Internal Redirect :ref:`architecture overview `. -message InternalRedirectPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.InternalRedirectPolicy"; - - // An internal redirect is not handled, unless the number of previous internal redirects that a - // downstream request has encountered is lower than this value. - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 1; - - // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, - // only 302 will be treated as internal redirect. - // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. - repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; - - // Specifies a list of predicates that are queried when an upstream response is deemed - // to trigger an internal redirect by all other criteria. Any predicate in the list can reject - // the redirect, causing the response to be proxied to downstream. - // [#extension-category: envoy.internal_redirect_predicates] - repeated core.v4alpha.TypedExtensionConfig predicates = 3; - - // Allow internal redirect to follow a target URI with a different scheme than the value of - // x-forwarded-proto. The default is false. - bool allow_cross_scheme_redirect = 4; -} - -// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the -// map value in -// :ref:`VirtualHost.typed_per_filter_config`, -// :ref:`Route.typed_per_filter_config`, -// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` -// to add additional flags to the filter. -// [#not-implemented-hide:] -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.FilterConfig"; - - // The filter config. - google.protobuf.Any config = 1; - - // If true, the filter is optional, meaning that if the client does - // not support the specified filter, it may ignore the map entry rather - // than rejecting the config. - bool is_optional = 2; -} diff --git a/api/envoy/config/route/v4alpha/scoped_route.proto b/api/envoy/config/route/v4alpha/scoped_route.proto deleted file mode 100644 index 4c640223f701c..0000000000000 --- a/api/envoy/config/route/v4alpha/scoped_route.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; -option java_outer_classname = "ScopedRouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP scoped routing configuration] -// * Routing :ref:`architecture overview ` - -// Specifies a routing scope, which associates a -// :ref:`Key` to a -// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` (identified by its resource name). -// -// The HTTP connection manager builds up a table consisting of these Key to -// RouteConfiguration mappings, and looks up the RouteConfiguration to use per -// request according to the algorithm specified in the -// :ref:`scope_key_builder` -// assigned to the HttpConnectionManager. -// -// For example, with the following configurations (in YAML): -// -// HttpConnectionManager config: -// -// .. code:: -// -// ... -// scoped_routes: -// name: foo-scoped-routes -// scope_key_builder: -// fragments: -// - header_value_extractor: -// name: X-Route-Selector -// element_separator: , -// element: -// separator: = -// key: vip -// -// ScopedRouteConfiguration resources (specified statically via -// :ref:`scoped_route_configurations_list` -// or obtained dynamically via SRDS): -// -// .. code:: -// -// (1) -// name: route-scope1 -// route_configuration_name: route-config1 -// key: -// fragments: -// - string_key: 172.10.10.20 -// -// (2) -// name: route-scope2 -// route_configuration_name: route-config2 -// key: -// fragments: -// - string_key: 172.20.20.30 -// -// A request from a client such as: -// -// .. code:: -// -// GET / HTTP/1.1 -// Host: foo.com -// X-Route-Selector: vip=172.10.10.20 -// -// would result in the routing table defined by the `route-config1` -// RouteConfiguration being assigned to the HTTP request/stream. -// -message ScopedRouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.ScopedRouteConfiguration"; - - // Specifies a key which is matched against the output of the - // :ref:`scope_key_builder` - // specified in the HttpConnectionManager. The matching is done per HTTP - // request and is dependent on the order of the fragments contained in the - // Key. - message Key { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.ScopedRouteConfiguration.Key"; - - message Fragment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment"; - - oneof type { - option (validate.required) = true; - - // A string to match against. - string string_key = 1; - } - } - - // The ordered set of fragments to match against. The order must match the - // fragments in the corresponding - // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Whether the RouteConfiguration should be loaded on demand. - bool on_demand = 4; - - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an - // RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated - // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The key to match against. - Key key = 3 [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/config/tap/v4alpha/BUILD b/api/envoy/config/tap/v4alpha/BUILD deleted file mode 100644 index f226f8b207e49..0000000000000 --- a/api/envoy/config/tap/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/matcher/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/tap/v4alpha/common.proto b/api/envoy/config/tap/v4alpha/common.proto deleted file mode 100644 index a425329be4e9b..0000000000000 --- a/api/envoy/config/tap/v4alpha/common.proto +++ /dev/null @@ -1,276 +0,0 @@ -syntax = "proto3"; - -package envoy.config.tap.v4alpha; - -import "envoy/config/common/matcher/v4alpha/matcher.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.tap.v4alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common tap configuration] - -// Tap configuration. -message TapConfig { - // [#comment:TODO(mattklein123): Rate limiting] - - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.TapConfig"; - - reserved 1; - - reserved "match_config"; - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - // Exactly one of :ref:`match ` and - // :ref:`match_config ` must be set. If both - // are set, the :ref:`match ` will be used. - common.matcher.v4alpha.MatchPredicate match = 4; - - // The tap output configuration. If a match configuration matches a data source being tapped, - // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for - // which the tap matching is enabled. When not enabled, the request\connection will not be - // recorded. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - core.v4alpha.RuntimeFractionalPercent tap_enabled = 3; -} - -// Tap match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 11] -message MatchPredicate { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.MatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - - // HTTP request generic body match configuration. - HttpGenericBodyMatch http_request_generic_body_match = 9; - - // HTTP response generic body match configuration. - HttpGenericBodyMatch http_response_generic_body_match = 10; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.HttpHeadersMatch"; - - // HTTP headers to match. - repeated route.v4alpha.HeaderMatcher headers = 1; -} - -// HTTP generic body match configuration. -// List of text strings and hex strings to be located in HTTP body. -// All specified strings must be found in the HTTP body for positive match. -// The search may be limited to specified number of bytes from the body start. -// -// .. attention:: -// -// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. -// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified -// to scan only part of the http body. -message HttpGenericBodyMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.HttpGenericBodyMatch"; - - message GenericTextMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch"; - - oneof rule { - option (validate.required) = true; - - // Text string to be located in HTTP body. - string string_match = 1 [(validate.rules).string = {min_len: 1}]; - - // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; - } - } - - // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). - uint32 bytes_limit = 1; - - // List of patterns to match. - repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// Tap output configuration. -message OutputConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; - - // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple - // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; - - // For buffered tapping, the maximum amount of received body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_rx_bytes = 2; - - // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_tx_bytes = 3; - - // Indicates whether taps produce a single buffered message per tap, or multiple streamed - // messages per tap in the emitted :ref:`TraceWrapper - // ` messages. Note that streamed tapping does not - // mean that no buffering takes place. Buffering may be required if data is processed before a - // match can be determined. See the HTTP tap filter :ref:`streaming - // ` documentation for more information. - bool streaming = 4; -} - -// Tap output sink configuration. -message OutputSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputSink"; - - // Output format. All output is in the form of one or more :ref:`TraceWrapper - // ` messages. This enumeration indicates - // how those messages are written. Note that not all sinks support all output formats. See - // individual sink documentation for more information. - enum Format { - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_bytes - // ` field. This means that body data will be - // base64 encoded as per the `proto3 JSON mappings - // `_. - JSON_BODY_AS_BYTES = 0; - - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_string - // ` field. This means that body data will be - // string encoded as per the `proto3 JSON mappings - // `_. This format type is - // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the - // user wishes to view it directly without being forced to base64 decode the body. - JSON_BODY_AS_STRING = 1; - - // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes - // multiple binary messages without any length information the data stream will not be - // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) - // this output format makes consumption simpler. - PROTO_BINARY = 2; - - // Messages are written as a sequence tuples, where each tuple is the message length encoded - // as a `protobuf 32-bit varint - // `_ - // followed by the binary message. The messages can be read back using the language specific - // protobuf coded stream implementation to obtain the message length and the message. - PROTO_BINARY_LENGTH_DELIMITED = 3; - - // Text proto format. - PROTO_TEXT = 4; - } - - // Sink output format. - Format format = 1 [(validate.rules).enum = {defined_only: true}]; - - oneof output_sink_type { - option (validate.required) = true; - - // Tap output will be streamed out the :http:post:`/tap` admin endpoint. - // - // .. attention:: - // - // It is only allowed to specify the streaming admin output sink if the tap is being - // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has - // been configured to receive tap configuration from some other source (e.g., static - // file, XDS, etc.) configuring the streaming admin output type will fail. - StreamingAdminSink streaming_admin = 2; - - // Tap output will be written to a file per tap sink. - FilePerTapSink file_per_tap = 3; - - // [#not-implemented-hide:] - // GrpcService to stream data to. The format argument must be PROTO_BINARY. - // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] - StreamingGrpcSink streaming_grpc = 4; - } -} - -// Streaming admin sink configuration. -message StreamingAdminSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.StreamingAdminSink"; -} - -// The file per tap sink outputs a discrete file for every tapped stream. -message FilePerTapSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.FilePerTapSink"; - - // Path prefix. The output file will be of the form _.pb, where is an - // identifier distinguishing the recorded trace for stream instances (the Envoy - // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; -} - -// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC -// server. -message StreamingGrpcSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.StreamingGrpcSink"; - - // Opaque identifier, that will be sent back to the streaming grpc server. - string tap_id = 1; - - // The gRPC server that hosts the Tap Sink Service. - core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/config/trace/v3/zipkin.proto b/api/envoy/config/trace/v3/zipkin.proto index 2c1026b8304a7..0638d89315faf 100644 --- a/api/envoy/config/trace/v3/zipkin.proto +++ b/api/envoy/config/trace/v3/zipkin.proto @@ -50,8 +50,7 @@ message ZipkinConfig { string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. + // using a standard Zipkin installation. string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Determines whether a 128bit trace id will be used when creating a new @@ -62,8 +61,7 @@ message ZipkinConfig { // The default value is true. google.protobuf.BoolValue shared_span_context = 4; - // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be - // used. + // Determines the selected collector endpoint version. CollectorEndpointVersion collector_endpoint_version = 5; // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors diff --git a/api/envoy/config/trace/v4alpha/BUILD b/api/envoy/config/trace/v4alpha/BUILD deleted file mode 100644 index 1d56979cc4660..0000000000000 --- a/api/envoy/config/trace/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/config/trace/v4alpha/http_tracer.proto b/api/envoy/config/trace/v4alpha/http_tracer.proto deleted file mode 100644 index 33c8e73d56b9d..0000000000000 --- a/api/envoy/config/trace/v4alpha/http_tracer.proto +++ /dev/null @@ -1,59 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v4alpha; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; -option java_outer_classname = "HttpTracerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - -// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. -// -// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one -// supported. -// -// .. attention:: -// -// Use of this message type has been deprecated in favor of direct use of -// :ref:`Tracing.Http `. -message Tracing { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.Tracing"; - - // Configuration for an HTTP tracer provider used by Envoy. - // - // The configuration is defined by the - // :ref:`HttpConnectionManager.Tracing ` - // :ref:`provider ` - // field. - message Http { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.Tracing.Http"; - - reserved 2; - - reserved "config"; - - // The name of the HTTP trace driver to instantiate. The name must match a - // supported HTTP trace driver. - // See the :ref:`extensions listed in typed_config below ` for the default list of the HTTP trace driver. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Trace driver specific configuration which must be set according to the driver being instantiated. - // [#extension-category: envoy.tracers] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - // Provides configuration for the HTTP tracer. - Http http = 1; -} diff --git a/api/envoy/config/trace/v4alpha/service.proto b/api/envoy/config/trace/v4alpha/service.proto deleted file mode 100644 index d132b32dd79d4..0000000000000 --- a/api/envoy/config/trace/v4alpha/service.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; -option java_outer_classname = "ServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Trace Service] - -// Configuration structure. -message TraceServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.TraceServiceConfig"; - - // The upstream gRPC cluster that hosts the metrics service. - core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/data/dns/v4alpha/dns_table.proto b/api/envoy/data/dns/v4alpha/dns_table.proto deleted file mode 100644 index 4f8626edece91..0000000000000 --- a/api/envoy/data/dns/v4alpha/dns_table.proto +++ /dev/null @@ -1,159 +0,0 @@ -syntax = "proto3"; - -package envoy.data.dns.v4alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.dns.v4alpha"; -option java_outer_classname = "DnsTableProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: DNS Filter Table Data] -// :ref:`DNS Filter config overview `. - -// This message contains the configuration for the DNS Filter if populated -// from the control plane -message DnsTable { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable"; - - // This message contains a list of IP addresses returned for a query for a known name - message AddressList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.AddressList"; - - // This field contains a well formed IP address that is returned in the answer for a - // name query. The address field can be an IPv4 or IPv6 address. Address family - // detection is done automatically when Envoy parses the string. Since this field is - // repeated, Envoy will return as many entries from this list in the DNS response while - // keeping the response under 512 bytes - repeated string address = 1 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; - } - - // Specify the service protocol using a numeric or string value - message DnsServiceProtocol { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsServiceProtocol"; - - oneof protocol_config { - option (validate.required) = true; - - // Specify the protocol number for the service. Envoy will try to resolve the number to - // the protocol name. For example, 6 will resolve to "tcp". Refer to: - // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml - // for protocol names and numbers - uint32 number = 1 [(validate.rules).uint32 = {lt: 255}]; - - // Specify the protocol name for the service. - string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - } - } - - // Specify the target for a given DNS service - // [#next-free-field: 6] - message DnsServiceTarget { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsServiceTarget"; - - // Specify the name of the endpoint for the Service. The name is a hostname or a cluster - oneof endpoint_type { - option (validate.required) = true; - - // Use a resolvable hostname as the endpoint for a service. - string host_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // Use a cluster name as the endpoint for a service. - string cluster_name = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - } - - // The priority of the service record target - uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}]; - - // The weight of the service record target - uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}]; - - // The port to which the service is bound. This value is optional if the target is a - // cluster. Setting port to zero in this case makes the filter use the port value - // from the cluster host - uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}]; - } - - // This message defines a service selection record returned for a service query in a domain - message DnsService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsService"; - - // The name of the service without the protocol or domain name - string service_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // The service protocol. This can be specified as a string or the numeric value of the protocol - DnsServiceProtocol protocol = 2; - - // The service entry time to live. This is independent from the DNS Answer record TTL - google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}]; - - // The list of targets hosting the service - repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}]; - } - - // Define a list of service records for a given service - message DnsServiceList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsServiceList"; - - repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - message DnsEndpoint { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsEndpoint"; - - oneof endpoint_config { - option (validate.required) = true; - - // Define a list of addresses to return for the specified endpoint - AddressList address_list = 1; - - // Define a cluster whose addresses are returned for the specified endpoint - string cluster_name = 2; - - // Define a DNS Service List for the specified endpoint - DnsServiceList service_list = 3; - } - } - - message DnsVirtualDomain { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; - - // A domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // The configuration containing the method to determine the address of this endpoint - DnsEndpoint endpoint = 2; - - // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; - } - - reserved 3; - - reserved "known_suffixes"; - - // Control how many times Envoy makes an attempt to forward a query to an external DNS server - uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - - // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this - // list empty, Envoy will forward all queries to external resolvers - repeated DnsVirtualDomain virtual_domains = 2; -} diff --git a/api/envoy/extensions/access_loggers/file/v4alpha/BUILD b/api/envoy/extensions/access_loggers/file/v4alpha/BUILD deleted file mode 100644 index c44559b4e763e..0000000000000 --- a/api/envoy/extensions/access_loggers/file/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/access_loggers/file/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/access_loggers/file/v4alpha/file.proto b/api/envoy/extensions/access_loggers/file/v4alpha/file.proto deleted file mode 100644 index 0597b11680598..0000000000000 --- a/api/envoy/extensions/access_loggers/file/v4alpha/file.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.file.v4alpha; - -import "envoy/config/core/v4alpha/substitution_format_string.proto"; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha"; -option java_outer_classname = "FileProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: File access log] -// [#extension: envoy.access_loggers.file] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* -// AccessLog. -// [#next-free-field: 6] -message FileAccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.file.v3.FileAccessLog"; - - reserved 2, 3, 4; - - reserved "format", "json_format", "typed_json_format"; - - // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_len: 1}]; - - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v4alpha.SubstitutionFormatString log_format = 5 - [(validate.rules).message = {required: true}]; - } -} diff --git a/api/envoy/extensions/access_loggers/grpc/v4alpha/BUILD b/api/envoy/extensions/access_loggers/grpc/v4alpha/BUILD deleted file mode 100644 index 83758c9e0b82b..0000000000000 --- a/api/envoy/extensions/access_loggers/grpc/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/access_loggers/grpc/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/access_loggers/grpc/v4alpha/als.proto b/api/envoy/extensions/access_loggers/grpc/v4alpha/als.proto deleted file mode 100644 index 9e6fb1e48386e..0000000000000 --- a/api/envoy/extensions/access_loggers/grpc/v4alpha/als.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.grpc.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.grpc.v4alpha"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Configuration for the built-in *envoy.access_loggers.http_grpc* -// :ref:`AccessLog `. This configuration will -// populate :ref:`StreamAccessLogsMessage.http_logs -// `. -// [#extension: envoy.access_loggers.http_grpc] -message HttpGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig"; - - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; - - // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers - // `. - repeated string additional_request_headers_to_log = 2; - - // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers - // `. - repeated string additional_response_headers_to_log = 3; - - // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers - // `. - repeated string additional_response_trailers_to_log = 4; -} - -// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will -// populate *StreamAccessLogsMessage.tcp_logs*. -// [#extension: envoy.access_loggers.tcp_grpc] -message TcpGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.grpc.v3.TcpGrpcAccessLogConfig"; - - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; -} - -// Common configuration for gRPC access logs. -// [#next-free-field: 7] -message CommonGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.grpc.v3.CommonGrpcAccessLogConfig"; - - // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier - // `. This allows the - // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The gRPC service for the access log service. - config.core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // API version for access logs service transport protocol. This describes the access logs service - // gRPC endpoint and version of messages used on the wire. - config.core.v4alpha.ApiVersion transport_api_version = 6 - [(validate.rules).enum = {defined_only: true}]; - - // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time - // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to - // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; - - // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until - // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it - // to zero effectively disables the batching. Defaults to 16384. - google.protobuf.UInt32Value buffer_size_bytes = 4; - - // Additional filter state objects to log in :ref:`filter_state_objects - // `. - // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object. - repeated string filter_state_objects_to_log = 5; -} diff --git a/api/envoy/extensions/access_loggers/open_telemetry/v4alpha/BUILD b/api/envoy/extensions/access_loggers/open_telemetry/v4alpha/BUILD deleted file mode 100644 index 2c81e3b0b05c4..0000000000000 --- a/api/envoy/extensions/access_loggers/open_telemetry/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/access_loggers/grpc/v4alpha:pkg", - "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opentelemetry_proto//:common", - ], -) diff --git a/api/envoy/extensions/access_loggers/open_telemetry/v4alpha/logs_service.proto b/api/envoy/extensions/access_loggers/open_telemetry/v4alpha/logs_service.proto deleted file mode 100644 index ceecd924e19d9..0000000000000 --- a/api/envoy/extensions/access_loggers/open_telemetry/v4alpha/logs_service.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.open_telemetry.v4alpha; - -import "envoy/extensions/access_loggers/grpc/v4alpha/als.proto"; - -import "opentelemetry/proto/common/v1/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v4alpha"; -option java_outer_classname = "LogsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: OpenTelemetry (gRPC) Access Log] - -// Configuration for the built-in *envoy.access_loggers.open_telemetry* -// :ref:`AccessLog `. This configuration will -// populate `opentelemetry.proto.collector.v1.logs.ExportLogsServiceRequest.resource_logs `_. -// OpenTelemetry `Resource `_ -// attributes are filled with Envoy node info. In addition, the request start time is set in the -// dedicated field. -// [#extension: envoy.access_loggers.open_telemetry] -// [#comment:TODO(itamarkam): allow configuration for resource attributes.] -message OpenTelemetryAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.open_telemetry.v3alpha.OpenTelemetryAccessLogConfig"; - - // [#comment:TODO(itamarkam): add 'filter_state_objects_to_log' to logs.] - grpc.v4alpha.CommonGrpcAccessLogConfig common_config = 1 - [(validate.rules).message = {required: true}]; - - // OpenTelemetry `LogResource `_ - // fields, following `Envoy access logging formatting `_. - // - // See 'body' in the LogResource proto for more details. - // Example: ``body { string_value: "%PROTOCOL%" }``. - opentelemetry.proto.common.v1.AnyValue body = 2; - - // See 'attributes' in the LogResource proto for more details. - // Example: ``attributes { values { key: "user_agent" value { string_value: "%REQ(USER-AGENT)%" } } }``. - opentelemetry.proto.common.v1.KeyValueList attributes = 3; -} diff --git a/api/envoy/extensions/access_loggers/stream/v4alpha/BUILD b/api/envoy/extensions/access_loggers/stream/v4alpha/BUILD deleted file mode 100644 index 33240debccd19..0000000000000 --- a/api/envoy/extensions/access_loggers/stream/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/access_loggers/stream/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/access_loggers/stream/v4alpha/stream.proto b/api/envoy/extensions/access_loggers/stream/v4alpha/stream.proto deleted file mode 100644 index 5be54ad4721dd..0000000000000 --- a/api/envoy/extensions/access_loggers/stream/v4alpha/stream.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.stream.v4alpha; - -import "envoy/config/core/v4alpha/substitution_format_string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.stream.v4alpha"; -option java_outer_classname = "StreamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Standard Streams Access loggers] -// [#extension: envoy.access_loggers.stream] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to the operating system's standard output. -message StdoutAccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.stream.v3.StdoutAccessLog"; - - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v4alpha.SubstitutionFormatString log_format = 1 - [(validate.rules).message = {required: true}]; - } -} - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to the operating system's standard error. -message StderrAccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.stream.v3.StderrAccessLog"; - - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v4alpha.SubstitutionFormatString log_format = 1 - [(validate.rules).message = {required: true}]; - } -} diff --git a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD index d53049c388f7d..05f25a2fe5d91 100644 --- a/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD +++ b/api/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/BUILD b/api/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/BUILD deleted file mode 100644 index ca83092e39b11..0000000000000 --- a/api/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/common/dynamic_forward_proxy/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/cluster.proto b/api/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/cluster.proto deleted file mode 100644 index 1b989e0bb725e..0000000000000 --- a/api/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/cluster.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.dynamic_forward_proxy.v4alpha; - -import "envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v4alpha"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dynamic forward proxy cluster configuration] - -// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.dynamic_forward_proxy] -message ClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig"; - - // The DNS cache configuration that the cluster will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy HTTP filter configuration - // `. - common.dynamic_forward_proxy.v4alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; - - // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options - // in the :ref:`cluster's upstream_http_protocol_options - // ` - bool allow_insecure_cluster_options = 2; -} diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD b/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD index fb5436a6bf93a..6e07b4a9226bb 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD @@ -8,8 +8,8 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/cluster/v3:pkg", - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/core/v3:pkg", + "//envoy/extensions/common/key_value/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index fa77bb8aad338..4a0d87ff6c3b8 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -6,6 +6,7 @@ import "envoy/config/cluster/v3/cluster.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/resolver.proto"; +import "envoy/extensions/common/key_value/v3/config.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -31,7 +32,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 13] +// [#next-free-field: 14] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -138,4 +139,8 @@ message DnsCacheConfig { // Setting this timeout will ensure that queries succeed or fail within the specified time frame // and are then retried using the standard refresh rates. Defaults to 5s if not set. google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gt {}}]; + + // [#not-implemented-hide:] + // Configuration to flush the DNS cache to long term storage. + key_value.v3.KeyValueStoreConfig key_value_config = 13; } diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/BUILD b/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/BUILD deleted file mode 100644 index a70cf4f2bbbd6..0000000000000 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/cluster/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto deleted file mode 100644 index 921437a1b20f6..0000000000000 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto +++ /dev/null @@ -1,138 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.dynamic_forward_proxy.v4alpha; - -import "envoy/config/cluster/v4alpha/cluster.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/resolver.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v4alpha"; -option java_outer_classname = "DnsCacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dynamic forward proxy common configuration] - -// Configuration of circuit breakers for resolver. -message DnsCacheCircuitBreakers { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.dynamic_forward_proxy.v3.DnsCacheCircuitBreakers"; - - // The maximum number of pending requests that Envoy will allow to the - // resolver. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 1; -} - -// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview -// ` for more information. -// [#next-free-field: 13] -message DnsCacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.dynamic_forward_proxy.v3.DnsCacheConfig"; - - reserved 8; - - reserved "use_tcp_for_dns_lookups"; - - // The name of the cache. Multiple named caches allow independent dynamic forward proxy - // configurations to operate within a single Envoy process using different configurations. All - // configurations with the same name *must* otherwise have the same settings when referenced - // from different configuration components. Configuration will fail to load if this is not - // the case. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The DNS lookup family to use during resolution. - // - // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The - // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and - // then configures a host to have a primary and fall back address. With this, we could very - // likely build a "happy eyeballs" connection pool which would race the primary / fall back - // address and return the one that wins. This same method could potentially also be used for - // QUIC to TCP fall back.] - config.cluster.v4alpha.Cluster.DnsLookupFamily dns_lookup_family = 2 - [(validate.rules).enum = {defined_only: true}]; - - // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. - // - // .. note: - // - // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be - // added in a future change. - // - // .. note: - // - // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. - google.protobuf.Duration dns_refresh_rate = 3 - [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // The TTL for hosts that are unused. Hosts that have not been used in the configured time - // interval will be purged. If not specified defaults to 5m. - // - // .. note: - // - // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This - // means that if the configured TTL is shorter than the refresh rate the host may not be removed - // immediately. - // - // .. note: - // - // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; - - // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum hosts in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; - - // If the DNS failure refresh rate is specified, - // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the dns_refresh_rate. - config.cluster.v4alpha.Cluster.RefreshRate dns_failure_refresh_rate = 6; - - // The config of circuit breakers for resolver. It provides a configurable threshold. - // Envoy will use dns cache circuit breakers with default settings even if this value is not set. - DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - config.core.v4alpha.DnsResolutionConfig dns_resolution_config = 9; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - config.core.v4alpha.TypedExtensionConfig typed_dns_resolver_config = 12; - - // Hostnames that should be preresolved into the cache upon creation. This might provide a - // performance improvement, in the form of cache hits, for hostnames that are going to be - // resolved during steady state and are known at config load time. - repeated config.core.v4alpha.SocketAddress preresolve_hostnames = 10; - - // The timeout used for DNS queries. This timeout is independent of any timeout and retry policy - // used by the underlying DNS implementation (e.g., c-areas and Apple DNS) which are opaque. - // Setting this timeout will ensure that queries succeed or fail within the specified time frame - // and are then retried using the standard refresh rates. Defaults to 5s if not set. - google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gt {}}]; -} diff --git a/api/envoy/data/dns/v4alpha/BUILD b/api/envoy/extensions/common/key_value/v3/BUILD similarity index 87% rename from api/envoy/data/dns/v4alpha/BUILD rename to api/envoy/extensions/common/key_value/v3/BUILD index e32ed76cbd6f7..1c1a6f6b44235 100644 --- a/api/envoy/data/dns/v4alpha/BUILD +++ b/api/envoy/extensions/common/key_value/v3/BUILD @@ -6,7 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/data/dns/v3:pkg", + "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/common/key_value/v3/config.proto b/api/envoy/extensions/common/key_value/v3/config.proto new file mode 100644 index 0000000000000..0db9c622cd16c --- /dev/null +++ b/api/envoy/extensions/common/key_value/v3/config.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package envoy.extensions.common.key_value.v3; + +import "envoy/config/core/v3/extension.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.common.key_value.v3"; +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Key Value Store storage plugin] + +// [#alpha:] +// This shared configuration for Envoy key value stores. +message KeyValueStoreConfig { + // [#extension-category: envoy.common.key_value] + config.core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}]; +} diff --git a/api/envoy/extensions/common/matching/v3/BUILD b/api/envoy/extensions/common/matching/v3/BUILD index 5fa93360e6558..1afd4545d9608 100644 --- a/api/envoy/extensions/common/matching/v3/BUILD +++ b/api/envoy/extensions/common/matching/v3/BUILD @@ -6,8 +6,10 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/annotations:pkg", "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/type/matcher/v3:pkg", ], ) diff --git a/api/envoy/extensions/common/matching/v3/extension_matcher.proto b/api/envoy/extensions/common/matching/v3/extension_matcher.proto index e317d885af393..eee82a381633b 100644 --- a/api/envoy/extensions/common/matching/v3/extension_matcher.proto +++ b/api/envoy/extensions/common/matching/v3/extension_matcher.proto @@ -5,6 +5,9 @@ package envoy.extensions.common.matching.v3; import "envoy/config/common/matcher/v3/matcher.proto"; import "envoy/config/core/v3/extension.proto"; +import "xds/type/matcher/v3/matcher.proto"; + +import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -21,8 +24,12 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // [#alpha:] message ExtensionWithMatcher { + // The associated matcher. This is deprecated in favor of xds_matcher. + config.common.matcher.v3.Matcher matcher = 1 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + // The associated matcher. - config.common.matcher.v3.Matcher matcher = 1 [(validate.rules).message = {required: true}]; + xds.type.matcher.v3.Matcher xds_matcher = 3; // The underlying extension config. config.core.v3.TypedExtensionConfig extension_config = 2 diff --git a/api/envoy/extensions/common/matching/v4alpha/BUILD b/api/envoy/extensions/common/matching/v4alpha/BUILD deleted file mode 100644 index 95ccc22a554af..0000000000000 --- a/api/envoy/extensions/common/matching/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/matcher/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/common/matching/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/common/matching/v4alpha/extension_matcher.proto b/api/envoy/extensions/common/matching/v4alpha/extension_matcher.proto deleted file mode 100644 index 88ac7c7570f8d..0000000000000 --- a/api/envoy/extensions/common/matching/v4alpha/extension_matcher.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.matching.v4alpha; - -import "envoy/config/common/matcher/v4alpha/matcher.proto"; -import "envoy/config/core/v4alpha/extension.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.matching.v4alpha"; -option java_outer_classname = "ExtensionMatcherProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Extension Matcher] - -// Wrapper around an existing extension that provides an associated matcher. This allows -// decorating an existing extension with a matcher, which can be used to match against -// relevant protocol data. -// -// [#alpha:] -message ExtensionWithMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.matching.v3.ExtensionWithMatcher"; - - // The associated matcher. - config.common.matcher.v4alpha.Matcher matcher = 1 [(validate.rules).message = {required: true}]; - - // The underlying extension config. - config.core.v4alpha.TypedExtensionConfig extension_config = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/extensions/common/tap/v4alpha/BUILD b/api/envoy/extensions/common/tap/v4alpha/BUILD deleted file mode 100644 index 4f2cbe751624c..0000000000000 --- a/api/envoy/extensions/common/tap/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/tap/v4alpha:pkg", - "//envoy/extensions/common/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/common/tap/v4alpha/common.proto b/api/envoy/extensions/common/tap/v4alpha/common.proto deleted file mode 100644 index d04e033f490bc..0000000000000 --- a/api/envoy/extensions/common/tap/v4alpha/common.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.tap.v4alpha; - -import "envoy/config/tap/v4alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.tap.v4alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common tap extension configuration] - -// Common configuration for all tap extensions. -message CommonExtensionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.tap.v3.CommonExtensionConfig"; - - oneof config_type { - option (validate.required) = true; - - // If specified, the tap filter will be configured via an admin handler. - AdminConfig admin_config = 1; - - // If specified, the tap filter will be configured via a static configuration that cannot be - // changed. - config.tap.v4alpha.TapConfig static_config = 2; - } -} - -// Configuration for the admin handler. See :ref:`here ` for -// more information. -message AdminConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.tap.v3.AdminConfig"; - - // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is - // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/BUILD b/api/envoy/extensions/filters/http/cache/v4alpha/BUILD deleted file mode 100644 index 583ecda68091a..0000000000000 --- a/api/envoy/extensions/filters/http/cache/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/http/cache/v3alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto deleted file mode 100644 index 5297a3d15ef89..0000000000000 --- a/api/envoy/extensions/filters/http/cache/v4alpha/cache.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.cache.v4alpha; - -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha"; -option java_outer_classname = "CacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP Cache Filter] - -// [#extension: envoy.filters.http.cache] -message CacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.cache.v3alpha.CacheConfig"; - - // [#not-implemented-hide:] - // Modifies cache key creation by restricting which parts of the URL are included. - message KeyCreatorParams { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams"; - - // If true, exclude the URL scheme from the cache key. Set to true if your origins always - // produce the same response for http and https requests. - bool exclude_scheme = 1; - - // If true, exclude the host from the cache key. Set to true if your origins' responses don't - // ever depend on host. - bool exclude_host = 2; - - // If *query_parameters_included* is nonempty, only query parameters matched - // by one or more of its matchers are included in the cache key. Any other - // query params will not affect cache lookup. - repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3; - - // If *query_parameters_excluded* is nonempty, query parameters matched by one - // or more of its matchers are excluded from the cache key (even if also - // matched by *query_parameters_included*), and will not affect cache lookup. - repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4; - } - - // Config specific to the cache storage implementation. - // [#extension-category: envoy.filters.http.cache] - google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - - // List of matching rules that defines allowed *Vary* headers. - // - // The *vary* response header holds a list of header names that affect the - // contents of a response, as described by - // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. - // - // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't matched by any rules in - // *allowed_vary_headers*, that response will not be cached. - // - // During lookup, *allowed_vary_headers* controls what request headers will be - // sent to the cache storage implementation. - repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2; - - // [#not-implemented-hide:] - // - // - // Modifies cache key creation by restricting which parts of the URL are included. - KeyCreatorParams key_creator_params = 3; - - // [#not-implemented-hide:] - // - // - // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache - // storage implementation may have its own limit beyond which it will reject insertions). - uint32 max_body_bytes = 4; -} diff --git a/api/envoy/extensions/filters/http/compressor/v4alpha/BUILD b/api/envoy/extensions/filters/http/compressor/v4alpha/BUILD deleted file mode 100644 index 251b6da666af6..0000000000000 --- a/api/envoy/extensions/filters/http/compressor/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/http/compressor/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/compressor/v4alpha/compressor.proto b/api/envoy/extensions/filters/http/compressor/v4alpha/compressor.proto deleted file mode 100644 index 11d7757d0980c..0000000000000 --- a/api/envoy/extensions/filters/http/compressor/v4alpha/compressor.proto +++ /dev/null @@ -1,106 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.compressor.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v4alpha"; -option java_outer_classname = "CompressorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Compressor] -// Compressor :ref:`configuration overview `. -// [#extension: envoy.filters.http.compressor] - -// [#next-free-field: 9] -message Compressor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.compressor.v3.Compressor"; - - message CommonDirectionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.compressor.v3.Compressor.CommonDirectionConfig"; - - // Runtime flag that controls whether compression is enabled or not for the direction this - // common config is put in. If set to false, the filter will operate as a pass-through filter - // in the chosen direction. If the field is omitted, the filter will be enabled. - config.core.v4alpha.RuntimeFeatureFlag enabled = 1; - - // Minimum value of Content-Length header of request or response messages (depending on the direction - // this common config is put in), in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value min_content_length = 2; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string content_type = 3; - } - - // Configuration for filter behavior on the request direction. - message RequestDirectionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.compressor.v3.Compressor.RequestDirectionConfig"; - - CommonDirectionConfig common_config = 1; - } - - // Configuration for filter behavior on the response direction. - message ResponseDirectionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.compressor.v3.Compressor.ResponseDirectionConfig"; - - CommonDirectionConfig common_config = 1; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool disable_on_etag_header = 2; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // - // .. attention:: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool remove_accept_encoding_header = 3; - } - - reserved 1, 2, 3, 4, 5; - - reserved "content_length", "content_type", "disable_on_etag_header", - "remove_accept_encoding_header", "runtime_enabled"; - - // A compressor library to use for compression. Currently only - // :ref:`envoy.compression.gzip.compressor` - // is included in Envoy. - // [#extension-category: envoy.compression.compressor] - config.core.v4alpha.TypedExtensionConfig compressor_library = 6 - [(validate.rules).message = {required: true}]; - - // Configuration for request compression. Compression is disabled by default if left empty. - RequestDirectionConfig request_direction_config = 7; - - // Configuration for response compression. Compression is enabled by default if left empty. - // - // .. attention:: - // - // If the field is not empty then the duplicate deprecated fields of the `Compressor` message, - // such as `content_length`, `content_type`, `disable_on_etag_header`, - // `remove_accept_encoding_header` and `runtime_enabled`, are ignored. - // - // Also all the statistics related to response compression will be rooted in - // `.compressor...response.*` - // instead of - // `.compressor...*`. - ResponseDirectionConfig response_direction_config = 8; -} diff --git a/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD b/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD deleted file mode 100644 index d12fc7262cac4..0000000000000 --- a/api/envoy/extensions/filters/http/csrf/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/http/csrf/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto b/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto deleted file mode 100644 index 3de55da6be8cf..0000000000000 --- a/api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.csrf.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha"; -option java_outer_classname = "CsrfProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: CSRF] -// Cross-Site Request Forgery :ref:`configuration overview `. -// [#extension: envoy.filters.http.csrf] - -// CSRF filter config. -message CsrfPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.csrf.v3.CsrfPolicy"; - - // Specifies the % of requests for which the CSRF filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message = {required: true}]; - - // Specifies that CSRF policies will be evaluated and tracked, but not enforced. - // - // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* and *Destination* to determine if it's valid, but will not - // enforce any policies. - config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2; - - // Specifies additional source origins that will be allowed in addition to - // the destination origin. - // - // More information on how this can be configured via runtime can be found - // :ref:`here `. - repeated type.matcher.v4alpha.StringMatcher additional_origins = 3; -} diff --git a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/BUILD deleted file mode 100644 index 8486b45d71d91..0000000000000 --- a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v4alpha:pkg", - "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/dynamic_forward_proxy.proto b/api/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/dynamic_forward_proxy.proto deleted file mode 100644 index 0dba06106b074..0000000000000 --- a/api/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/dynamic_forward_proxy.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.dynamic_forward_proxy.v4alpha; - -import "envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v4alpha"; -option java_outer_classname = "DynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dynamic forward proxy] - -// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.filters.http.dynamic_forward_proxy] -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig"; - - // The DNS cache configuration that the filter will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy cluster configuration - // `. - common.dynamic_forward_proxy.v4alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; -} - -// Per route Configuration for the dynamic forward proxy HTTP filter. -message PerRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.dynamic_forward_proxy.v3.PerRouteConfig"; - - oneof host_rewrite_specifier { - // Indicates that before DNS lookup, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for DNS lookups whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite_literal = 1; - - // Indicates that before DNS lookup, the host header will be swapped with - // the value of this header. If not set or empty, the original host header - // value will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite header ` - // given that the value set here would be used for DNS lookups whereas the value set in the HCM - // would be used for host header forwarding which is not the desired outcome. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string host_rewrite_header = 2; - } -} diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index c04d53f1cf8be..62feb51b191d5 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -10,6 +10,7 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; +import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -308,7 +309,7 @@ message CheckSettings { // // These settings are only applied to a filter configured with a // :ref:`grpc_service`. - map context_extensions = 1; + map context_extensions = 1 [(udpa.annotations.sensitive) = true]; // When set to true, disable the configured :ref:`with_request_body // ` for a route. diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD b/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD deleted file mode 100644 index 16a0c5f1b64c4..0000000000000 --- a/api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/http/ext_authz/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto deleted file mode 100644 index 35b0cbd2f5475..0000000000000 --- a/api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ /dev/null @@ -1,316 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ext_authz.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; -import "envoy/config/core/v4alpha/http_uri.proto"; -import "envoy/type/matcher/v4alpha/metadata.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/v3/http_status.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: External Authorization] -// External Authorization :ref:`configuration overview `. -// [#extension: envoy.filters.http.ext_authz] - -// [#next-free-field: 16] -message ExtAuthz { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; - - reserved 4; - - reserved "use_alpha"; - - // External authorization service configuration. - oneof services { - // gRPC service configuration (default timeout: 200ms). - config.core.v4alpha.GrpcService grpc_service = 1; - - // HTTP service configuration (default timeout: 200ms). - HttpService http_service = 3; - } - - // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and - // version of messages used on the wire. - config.core.v4alpha.ApiVersion transport_api_version = 12 - [(validate.rules).enum = {defined_only: true}]; - - // Changes filter's behaviour on errors: - // - // 1. When set to true, the filter will *accept* client request even if the communication with - // the authorization service has failed, or if the authorization service has returned a HTTP 5xx - // error. - // - // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* - // response if the communication with the authorization service has failed, or if the - // authorization service has returned a HTTP 5xx error. - // - // Note that errors can be *always* tracked in the :ref:`stats - // `. - bool failure_mode_allow = 2; - - // Enables filter to buffer the client request body and send it within the authorization request. - // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization - // request message indicating if the body data is partial. - BufferSettings with_request_body = 5; - - // Clears route cache in order to allow the external authorization service to correctly affect - // routing decisions. Filter clears all cached routes when: - // - // 1. The field is set to *true*. - // - // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. - // - // 3. At least one *authorization response header* is added to the client request, or is used for - // altering another client request header. - // - bool clear_route_cache = 6; - - // Sets the HTTP status that is returned to the client when there is a network error between the - // filter and the authorization server. The default status is HTTP 403 Forbidden. - type.v3.HttpStatus status_on_error = 7; - - // Specifies a list of metadata namespaces whose values, if present, will be passed to the - // ext_authz service as an opaque *protobuf::Struct*. - // - // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata - // ` is set, - // then the following will pass the jwt payload to the authorization server. - // - // .. code-block:: yaml - // - // metadata_context_namespaces: - // - envoy.filters.http.jwt_authn - // - repeated string metadata_context_namespaces = 8; - - // Specifies if the filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // If this field is not specified, the filter will be enabled for all requests. - config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; - - // Specifies if the filter is enabled with metadata matcher. - // If this field is not specified, the filter will be enabled for all requests. - type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 14; - - // Specifies whether to deny the requests, when the filter is disabled. - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to determine whether to deny request for - // filter protected path at filter disabling. If filter is disabled in - // typed_per_filter_config for the path, requests will not be denied. - // - // If this field is not specified, all requests will be allowed when disabled. - config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 10; - - // Optional additional prefix to use when emitting statistics. This allows to distinguish - // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: - // - // .. code-block:: yaml - // - // http_filters: - // - name: envoy.filters.http.ext_authz - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. - // - name: envoy.filters.http.ext_authz - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. - // - string stat_prefix = 13; - - // Optional labels that will be passed to :ref:`labels` in - // :ref:`destination`. - // The labels will be read from :ref:`metadata` with the specified key. - string bootstrap_metadata_labels_key = 15; -} - -// Configuration for buffering the request data. -message BufferSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.BufferSettings"; - - // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return - // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number - // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow - // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; - - // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. - // The authorization request will be dispatched and no 413 HTTP error will be returned by the - // filter. - bool allow_partial_message = 2; - - // If true, the body sent to the external authorization service is set with raw bytes, it sets - // the :ref:`raw_body` - // field of HTTP request attribute context. Otherwise, :ref:` - // body` will be filled - // with UTF-8 string request body. - bool pack_as_bytes = 3; -} - -// HttpService is used for raw HTTP communication between the filter and the authorization service. -// When configured, the filter will parse the client request and use these attributes to call the -// authorization server. Depending on the response, the filter may reject or accept the client -// request. Note that in any of these events, metadata can be added, removed or overridden by the -// filter: -// -// *On authorization request*, a list of allowed request headers may be supplied. See -// :ref:`allowed_headers -// ` -// for details. Additional headers metadata may be added to the authorization request. See -// :ref:`headers_to_add -// ` for -// details. -// -// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and -// additional headers metadata may be added to the original client request. See -// :ref:`allowed_upstream_headers -// ` -// for details. Additionally, the filter may add additional headers to the client's response. See -// :ref:`allowed_client_headers_on_success -// ` -// for details. -// -// On other authorization response statuses, the filter will not allow traffic. Additional headers -// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers -// ` -// for details. -// [#next-free-field: 9] -message HttpService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.HttpService"; - - reserved 3, 4, 5, 6; - - // Sets the HTTP server URI which the authorization requests must be sent to. - config.core.v4alpha.HttpUri server_uri = 1; - - // Sets a prefix to the value of authorization request header *Path*. - string path_prefix = 2; - - // Settings used for controlling authorization request metadata. - AuthorizationRequest authorization_request = 7; - - // Settings used for controlling authorization response metadata. - AuthorizationResponse authorization_response = 8; -} - -message AuthorizationRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; - - // Authorization request includes the client request headers that have a correspondent match - // in the :ref:`list `. - // - // .. note:: - // - // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, - // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. - // - // .. note:: - // - // By default, ``Content-Length`` header is set to ``0`` and the request to the authorization - // service has no message body. However, the authorization request *may* include the buffered - // client request body (controlled by :ref:`with_request_body - // ` - // setting) hence the value of its ``Content-Length`` reflects the size of its payload size. - // - type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; - - // Sets a list of headers that will be included to the request to authorization service. Note that - // client request of the same key will be overridden. - repeated config.core.v4alpha.HeaderValue headers_to_add = 2; -} - -message AuthorizationResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse"; - - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the original client request. - // Note that coexistent headers will be overridden. - type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; - - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that coexistent headers will be appended. - type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that when this list is *not* set, all the authorization response headers, except *Authority - // (Host)* will be in the response to the client. When a header is included in this list, *Path*, - // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response when - // the authorization response itself is successful, i.e. not failed or denied. When this list is - // *not* set, no additional headers will be added to the client's response on success. - type.matcher.v4alpha.ListStringMatcher allowed_client_headers_on_success = 4; -} - -// Extra settings on a per virtualhost/route/weighted-cluster level. -message ExtAuthzPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute"; - - oneof override { - option (validate.required) = true; - - // Disable the ext auth filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; - } -} - -// Extra settings for the check request. -message CheckSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; - - // Context extensions to set on the CheckRequest's - // :ref:`AttributeContext.context_extensions` - // - // You can use this to provide extra context for the external authorization server on specific - // virtual hosts/routes. For example, adding a context extension on the virtual host level can - // give the ext-authz server information on what virtual host is used without needing to parse the - // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged - // in order, and the result will be used. - // - // Merge semantics for this field are such that keys from more specific configs override. - // - // .. note:: - // - // These settings are only applied to a filter configured with a - // :ref:`grpc_service`. - map context_extensions = 1; - - // When set to true, disable the configured :ref:`with_request_body - // ` for a route. - bool disable_request_body_buffering = 2; -} diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto index 352403ad5b20d..f60865c62315e 100644 --- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto +++ b/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto @@ -23,26 +23,15 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // The External Processing filter allows an external service to act on HTTP traffic in a flexible way. // **Current Implementation Status:** -// The filter will send the "request_headers" and "response_headers" messages by default. -// In addition, if the "processing mode" is set , the "request_body" and "response_body" -// messages will be sent if the corresponding fields of the "processing_mode" are -// set to BUFFERED or STREAMED, and trailers will be sent if the corresponding fields are set -// to SEND. The BUFFERED_PARTIAL body processing mode is not -// implemented yet. The filter will also respond to "immediate_response" messages -// at any point in the stream. - -// As designed, the filter supports up to six different processing steps, which are in the -// process of being implemented: +// All options and processing modes are implemented except for the following: // -// * Request headers: IMPLEMENTED -// * Request body: BUFFERED_PARTIAL processing mode is not yet implemented -// * Request trailers: IMPLEMENTED -// * Response headers: IMPLEMENTED -// * Response body: BUFFERED_PARTIAL processing mode is not yet implemented -// * Response trailers: IMPLEMENTED - -// The filter communicates with an external gRPC service that can use it to do a variety of things -// with the request and response: +// * Request and response attributes are not sent and not processed. +// * Dynamic metadata in responses from the external processor is ignored. +// * "async mode" is not implemented +// * Per-route configuration is not implemented + +// The filter communicates with an external gRPC service called an "external processor" +// that can do a variety of things with the request and response: // // * Access and modify the HTTP headers on the request, response, or both // * Access and modify the HTTP request and response bodies @@ -62,6 +51,30 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // * To modify request or response trailers if they already exist // * To add request or response trailers where they are not present // +// The filter supports up to six different processing steps. Each is represented by +// a gRPC stream message that is sent to the external processor. For each message, the +// processor must send a matching response. +// +// * Request headers: Contains the headers from the original HTTP request. +// * Request body: Sent in a single message if the BUFFERED or BUFFERED_PARTIAL +// mode is chosen, in multiple messages if the STREAMED mode is chosen, and not +// at all otherwise. +// * Request trailers: Delivered if they are present and if the trailer mode is set +// to SEND. +// * Response headers: Contains the headers from the HTTP response. Keep in mind +// that if the upstream system sends them before processing the request body that +// this message may arrive before the complete body. +// * Response body: Sent according to the processing mode like the request body. +// * Response trailers: Delivered according to the processing mode like the +// request trailers. +// +// By default, the processor sends only the request and response headers messages. +// This may be changed to include any of the six steps by changing the processing_mode +// setting of the filter configuration, or by setting the mode_override of any response +// from the external processor. This way, a processor may, for example, use information +// in the request header to determine whether the message body must be examined, or whether +// the proxy should simply stream it straight through. +// // All of this together allows a server to process the filter traffic in fairly // sophisticated ways. For example: // diff --git a/api/envoy/extensions/filters/http/fault/v4alpha/BUILD b/api/envoy/extensions/filters/http/fault/v4alpha/BUILD deleted file mode 100644 index 6b7506bcbf76d..0000000000000 --- a/api/envoy/extensions/filters/http/fault/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/common/fault/v3:pkg", - "//envoy/extensions/filters/http/fault/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto b/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto deleted file mode 100644 index da8b8b48ad3f5..0000000000000 --- a/api/envoy/extensions/filters/http/fault/v4alpha/fault.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.fault.v4alpha; - -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/extensions/filters/common/fault/v3/fault.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Fault Injection] -// Fault Injection :ref:`configuration overview `. -// [#extension: envoy.filters.http.fault] - -// [#next-free-field: 6] -message FaultAbort { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.fault.v3.FaultAbort"; - - // Fault aborts are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderAbort { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort"; - } - - reserved 1; - - oneof error_type { - option (validate.required) = true; - - // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // gRPC status code to use to abort the gRPC request. - uint32 grpc_status = 5; - - // Fault aborts are controlled via an HTTP header (if applicable). - HeaderAbort header_abort = 4; - } - - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. - type.v3.FractionalPercent percentage = 3; -} - -// [#next-free-field: 16] -message HTTPFault { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.fault.v3.HTTPFault"; - - // If specified, the filter will inject delays based on the values in the - // object. - common.fault.v3.FaultDelay delay = 1; - - // If specified, the filter will abort requests based on the values in - // the object. At least *abort* or *delay* must be specified. - FaultAbort abort = 2; - - // Specifies the name of the (destination) upstream cluster that the - // filter should match on. Fault injection will be restricted to requests - // bound to the specific upstream cluster. - string upstream_cluster = 3; - - // Specifies a set of headers that the filter should match on. The fault - // injection filter can be applied selectively to requests that match a set of - // headers specified in the fault filter config. The chances of actual fault - // injection further depend on the value of the :ref:`percentage - // ` field. - // The filter will check the request's headers against all the specified - // headers in the filter config. A match will happen if all the headers in the - // config are present in the request with the same values (or based on - // presence if the *value* field is not in the config). - repeated config.route.v4alpha.HeaderMatcher headers = 4; - - // Faults are injected for the specified list of downstream hosts. If this - // setting is not set, faults are injected for all downstream nodes. - // Downstream node name is taken from :ref:`the HTTP - // x-envoy-downstream-service-node - // ` header and compared - // against downstream_nodes list. - repeated string downstream_nodes = 5; - - // The maximum number of faults that can be active at a single time via the configured fault - // filter. Note that because this setting can be overridden at the route level, it's possible - // for the number of active faults to be greater than this value (if injected via a different - // route). If not specified, defaults to unlimited. This setting can be overridden via - // `runtime ` and any faults that are not injected - // due to overflow will be indicated via the `faults_overflow - // ` stat. - // - // .. attention:: - // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy - // limit. It's possible for the number of active faults to rise slightly above the configured - // amount due to the implementation details. - google.protobuf.UInt32Value max_active_faults = 6; - - // The response rate limit to be applied to the response body of the stream. When configured, - // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent - // ` runtime key. - // - // .. attention:: - // This is a per-stream limit versus a connection level limit. This means that concurrent streams - // will each get an independent limit. - common.fault.v3.FaultRateLimit response_rate_limit = 7; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_delay_percent - string delay_percent_runtime = 8; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.abort_percent - string abort_percent_runtime = 9; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_duration_ms - string delay_duration_runtime = 10; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.http_status - string abort_http_status_runtime = 11; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.max_active_faults - string max_active_faults_runtime = 12; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.rate_limit.response_percent - string response_rate_limit_percent_runtime = 13; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.grpc_status - string abort_grpc_status_runtime = 14; - - // To control whether stats storage is allocated dynamically for each downstream server. - // If set to true, "x-envoy-downstream-service-cluster" field of header will be ignored by this filter. - // If set to false, dynamic stats storage will be allocated for the downstream cluster name. - // Default value is false. - bool disable_downstream_cluster_stats = 15; -} diff --git a/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto index b2c4ad2ee6815..615fea923a8e1 100644 --- a/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto +++ b/api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto @@ -28,8 +28,25 @@ message FilterConfig { // If true, Envoy will assume that the upstream doesn't understand gRPC frames and // strip the gRPC frame from the request, and add it back in to the response. This will // hide the gRPC semantics from the upstream, allowing it to receive and respond with a - // simple binary encoded protobuf. + // simple binary encoded protobuf. In order to calculate the `Content-Length` header value, Envoy + // will buffer the upstream response unless :ref:`response_size_header + // ` + // is set, in which case Envoy will use the value of an upstream header to calculate the content + // length. bool withhold_grpc_frames = 2; + + // When :ref:`withhold_grpc_frames + // ` + // is true, this option controls how Envoy calculates the `Content-Length`. When + // *response_size_header* is empty, Envoy will buffer the upstream response to calculate its + // size. When *response_size_header* is set to a non-empty string, Envoy will stream the response + // to the downstream and it will use the value of the response header with this name to set the + // `Content-Length` header and gRPC frame size. If the header with this name is repeated, only + // the first value will be used. + // + // Envoy will treat the upstream response as an error if this option is specified and the header + // is missing or if the value does not match the actual response body size. + string response_size_header = 3; } // gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level. diff --git a/api/envoy/extensions/filters/http/gzip/v4alpha/BUILD b/api/envoy/extensions/filters/http/gzip/v4alpha/BUILD deleted file mode 100644 index 3b9648df09294..0000000000000 --- a/api/envoy/extensions/filters/http/gzip/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/filters/http/compressor/v4alpha:pkg", - "//envoy/extensions/filters/http/gzip/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/gzip/v4alpha/gzip.proto b/api/envoy/extensions/filters/http/gzip/v4alpha/gzip.proto deleted file mode 100644 index 8689148b46253..0000000000000 --- a/api/envoy/extensions/filters/http/gzip/v4alpha/gzip.proto +++ /dev/null @@ -1,81 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.gzip.v4alpha; - -import "envoy/extensions/filters/http/compressor/v4alpha/compressor.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.gzip.v4alpha"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Gzip] - -// [#next-free-field: 12] -message Gzip { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.gzip.v3.Gzip"; - - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } - - message CompressionLevel { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.gzip.v3.Gzip.CompressionLevel"; - - enum Enum { - DEFAULT = 0; - BEST = 1; - SPEED = 2; - } - } - - reserved 2, 6, 7, 8; - - reserved "content_length", "content_type", "disable_on_etag_header", - "remove_accept_encoding_header"; - - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - - // A value used for selecting the zlib compression level. This setting will affect speed and - // amount of compression applied to the content. "BEST" provides higher compression at the cost of - // higher latency, "SPEED" provides lower compression with minimum impact on response time. - // "DEFAULT" provides an optimal result between speed and compression. This field will be set to - // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; - - // A value used for selecting the zlib compression strategy which is directly related to the - // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though - // there are situations which changing this parameter might produce better results. For example, - // run-length encoding (RLE) is typically used when the content is known for having sequences - // which same data occurs many consecutive times. For more information about each strategy, please - // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; - - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 12 - // which will produce a 4096 bytes window. For more details about this parameter, please refer to - // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Set of configuration parameters common for all compression filters. You can define - // `content_length`, `content_type` and other parameters in this field. - compressor.v4alpha.Compressor compressor = 10; - - // Value for Zlib's next output buffer. If not set, defaults to 4096. - // See https://www.zlib.net/manual.html for more details. Also see - // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. - google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD deleted file mode 100644 index 0a8d5eb27fb44..0000000000000 --- a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto deleted file mode 100644 index 5b06f1e78556b..0000000000000 --- a/api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ /dev/null @@ -1,130 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.header_to_metadata.v4alpha; - -import "envoy/type/matcher/v4alpha/regex.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha"; -option java_outer_classname = "HeaderToMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Header-To-Metadata Filter] -// -// The configuration for transforming headers into metadata. This is useful -// for matching load balancer subsets, logging, etc. -// -// Header to Metadata :ref:`configuration overview `. -// [#extension: envoy.filters.http.header_to_metadata] - -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.header_to_metadata.v3.Config"; - - enum ValueType { - STRING = 0; - - NUMBER = 1; - - // The value is a serialized `protobuf.Value - // `_. - PROTOBUF_VALUE = 2; - } - - // ValueEncode defines the encoding algorithm. - enum ValueEncode { - // The value is not encoded. - NONE = 0; - - // The value is encoded in `Base64 `_. - // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the - // non-ASCII characters in the header. - BASE64 = 1; - } - - // [#next-free-field: 7] - message KeyValuePair { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair"; - - // The namespace — if this is empty, the filter's namespace will be used. - string metadata_namespace = 1; - - // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_len: 1}]; - - oneof value_type { - // The value to pair with the given key. - // - // When used for a - // :ref:`on_header_present ` - // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. - // - // When used for a :ref:`on_header_missing ` - // case, a non-empty value must be provided otherwise no metadata is added. - string value = 3; - - // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value - // is used as-is. - // - // This is only used for :ref:`on_header_present `. - // - // Note: if the `value` field is non-empty this field should be empty. - type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6; - } - - // The value's type — defaults to string. - ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; - - // How is the value encoded, default is NONE (not encoded). - // The value will be decoded accordingly before storing to metadata. - ValueEncode encode = 5; - } - - // A Rule defines what metadata to apply when a header is present or missing. - // [#next-free-field: 6] - message Rule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; - - oneof header_cookie_specifier { - // Specifies that a match will be performed on the value of a header or a cookie. - // - // The header to be extracted. - string header = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The cookie to be extracted. - string cookie = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - } - - // If the header or cookie is present, apply this metadata KeyValuePair. - // - // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header or cookie value. - KeyValuePair on_present = 2; - - // If the header or cookie is not present, apply this metadata KeyValuePair. - // - // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header or cookie value. - KeyValuePair on_missing = 3; - - // Whether or not to remove the header after a rule is applied. - // - // This prevents headers from leaking. - // This field is not supported in case of a cookie. - bool remove = 4; - } - - // The list of rules to apply to requests. - repeated Rule request_rules = 1; - - // The list of rules to apply to responses. - repeated Rule response_rules = 2; -} diff --git a/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD b/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD deleted file mode 100644 index 4c4dc0e452110..0000000000000 --- a/api/envoy/extensions/filters/http/health_check/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/http/health_check/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto b/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto deleted file mode 100644 index 3725d085dd7b0..0000000000000 --- a/api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.health_check.v4alpha; - -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Health check] -// Health check :ref:`configuration overview `. -// [#extension: envoy.filters.http.health_check] - -// [#next-free-field: 6] -message HealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.health_check.v3.HealthCheck"; - - reserved 2; - - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; - - // If operating in pass through mode, the amount of time in milliseconds - // that the filter should cache the upstream response. - google.protobuf.Duration cache_time = 3; - - // If operating in non-pass-through mode, specifies a set of upstream cluster - // names and the minimum percentage of servers in each of those clusters that - // must be healthy or degraded in order for the filter to return a 200. - // - // .. note:: - // - // This value is interpreted as an integer by truncating, so 12.50% will be calculated - // as if it were 12%. - map cluster_min_healthy_percentages = 4; - - // Specifies a set of health check request headers to match on. The health check filter will - // check a request’s headers against all the specified headers. To specify the health check - // endpoint, set the ``:path`` header to match on. - repeated config.route.v4alpha.HeaderMatcher headers = 5; -} diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 9e658ed8627ff..9718dbe0550ab 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // cache_duration: // seconds: 300 // -// [#next-free-field: 13] +// [#next-free-field: 14] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; @@ -181,6 +181,19 @@ message JwtProvider { // repeated string from_params = 7; + // JWT is sent in a cookie. `from_cookies` represents the cookie names to extract from. + // + // For example, if config is: + // + // .. code-block:: yaml + // + // from_cookies: + // - auth-token + // + // Then JWT will be extracted from `auth-token` cookie in the request. + // + repeated string from_cookies = 13; + // This field specifies the header name to forward a successfully verified JWT payload to the // backend. The forwarded data is:: // diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD deleted file mode 100644 index f59226044ce77..0000000000000 --- a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/http/jwt_authn/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto deleted file mode 100644 index 57c6630c940e7..0000000000000 --- a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto +++ /dev/null @@ -1,674 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.jwt_authn.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/http_uri.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: JWT Authentication] -// JWT Authentication :ref:`configuration overview `. -// [#extension: envoy.filters.http.jwt_authn] - -// Please see following for JWT authentication flow: -// -// * `JSON Web Token (JWT) `_ -// * `The OAuth 2.0 Authorization Framework `_ -// * `OpenID Connect `_ -// -// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: -// -// * issuer: the principal that issues the JWT. If specified, it has to match the *iss* field in JWT. -// * allowed audiences: the ones in the token have to be listed here. -// * how to fetch public key JWKS to verify the token signature. -// * how to extract JWT token in the request. -// * how to pass successfully verified token payload. -// -// Example: -// -// .. code-block:: yaml -// -// issuer: https://example.com -// audiences: -// - bookstore_android.apps.googleusercontent.com -// - bookstore_web.apps.googleusercontent.com -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// timeout: 1s -// cache_duration: -// seconds: 300 -// -// [#next-free-field: 13] -message JwtProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; - - // Specify the `principal `_ that issued - // the JWT, usually a URL or an email address. - // - // It is optional. If specified, it has to match the *iss* field in JWT. - // - // If a JWT has *iss* field and this field is specified, they have to match, otherwise the - // JWT *iss* field is not checked. - // - // Note: *JwtRequirement* :ref:`allow_missing ` - // and :ref:`allow_missing_or_failed ` - // are implemented differently than other *JwtRequirements*. Hence the usage of this field - // is different as follows if *allow_missing* or *allow_missing_or_failed* is used: - // - // * If a JWT has *iss* field, it needs to be specified by this field in one of *JwtProviders*. - // * If a JWT doesn't have *iss* field, one of *JwtProviders* should fill this field empty. - // * Multiple *JwtProviders* should not have same value in this field. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com - // - string issuer = 1; - - // The list of JWT `audiences `_ are - // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, - // will not check audiences in the token. - // - // Example: - // - // .. code-block:: yaml - // - // audiences: - // - bookstore_android.apps.googleusercontent.com - // - bookstore_web.apps.googleusercontent.com - // - repeated string audiences = 2; - - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; - - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // timeout: 1s - // cache_duration: - // seconds: 300 - // - RemoteJwks remote_jwks = 3; - - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - config.core.v4alpha.DataSource local_jwks = 4; - } - - // If false, the JWT is removed in the request after a success verification. If true, the JWT is - // not removed in the request. Default value is false. - bool forward = 5; - - // Two fields below define where to extract the JWT from an HTTP request. - // - // If no explicit location is specified, the following default locations are tried in order: - // - // 1. The Authorization header using the `Bearer schema - // `_. Example:: - // - // Authorization: Bearer . - // - // 2. `access_token `_ query parameter. - // - // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations - // its provider specified or from the default locations. - // - // Specify the HTTP headers to extract JWT token. For examples, following config: - // - // .. code-block:: yaml - // - // from_headers: - // - name: x-goog-iap-jwt-assertion - // - // can be used to extract token from header:: - // - // ``x-goog-iap-jwt-assertion: ``. - // - repeated JwtHeader from_headers = 6; - - // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. - // - // For example, if config is: - // - // .. code-block:: yaml - // - // from_params: - // - jwt_token - // - // The JWT format in query parameter is:: - // - // /path?jwt_token= - // - repeated string from_params = 7; - - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - string forward_payload_header = 8 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // When :ref:`forward_payload_header ` - // is specified, the base64 encoded payload will be added to the headers. - // Normally JWT based64 encode doesn't add padding. If this field is true, - // the header will be padded. - // - // This field is only relevant if :ref:`forward_payload_header ` - // is specified. - bool pad_forward_payload_header = 11; - - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - string payload_in_metadata = 9; - - // Specify the clock skew in seconds when verifying JWT time constraint, - // such as `exp`, and `nbf`. If not specified, default is 60 seconds. - uint32 clock_skew_seconds = 10; - - // Enables JWT cache, its size is specified by *jwt_cache_size*. - // Only valid JWT tokens are cached. - JwtCacheConfig jwt_cache_config = 12; -} - -// This message specifies JWT Cache configuration. -message JwtCacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtCacheConfig"; - - // The unit is number of JWT tokens, default to 100. - uint32 jwt_cache_size = 1; -} - -// This message specifies how to fetch JWKS from remote and how to cache it. -message RemoteJwks { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks"; - - // The HTTP URI to fetch the JWKS. For example: - // - // .. code-block:: yaml - // - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // timeout: 1s - // - config.core.v4alpha.HttpUri http_uri = 1; - - // Duration after which the cached JWKS should be expired. If not specified, default cache - // duration is 5 minutes. - google.protobuf.Duration cache_duration = 2; - - // Fetch Jwks asynchronously in the main thread before the listener is activated. - // Fetched Jwks can be used by all worker threads. - // - // If this feature is not enabled: - // - // * The Jwks is fetched on-demand when the requests come. During the fetching, first - // few requests are paused until the Jwks is fetched. - // * Each worker thread fetches its own Jwks since Jwks cache is per worker thread. - // - // If this feature is enabled: - // - // * Fetched Jwks is done in the main thread before the listener is activated. Its fetched - // Jwks can be used by all worker threads. Each worker thread doesn't need to fetch its own. - // * Jwks is ready when the requests come, not need to wait for the Jwks fetching. - // - JwksAsyncFetch async_fetch = 3; - - // Retry policy for fetching Jwks. optional. turned off by default. - // - // For example: - // - // .. code-block:: yaml - // - // retry_policy: - // retry_back_off: - // base_interval: 0.01s - // max_interval: 20s - // num_retries: 10 - // - // will yield a randomized truncated exponential backoff policy with an initial delay of 10ms - // 10 maximum attempts spaced at most 20s seconds. - // - // .. code-block:: yaml - // - // retry_policy: - // num_retries:1 - // - // uses the default :ref:`retry backoff strategy `. - // with the default base interval is 1000 milliseconds. and the default maximum interval of 10 times the base interval. - // - // if num_retries is omitted, the default is to allow only one retry. - // - // - // If enabled, the retry policy will apply to all Jwks fetching approaches, e.g. on demand or asynchronously in background. - // - // - config.core.v4alpha.RetryPolicy retry_policy = 4; -} - -// Fetch Jwks asynchronously in the main thread when the filter config is parsed. -// The listener is activated only after the Jwks is fetched. -// When the Jwks is expired in the cache, it is fetched again in the main thread. -// The fetched Jwks from the main thread can be used by all worker threads. -message JwksAsyncFetch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwksAsyncFetch"; - - // If false, the listener is activated after the initial fetch is completed. - // The initial fetch result can be either successful or failed. - // If true, it is activated without waiting for the initial fetch to complete. - // Default is false. - bool fast_listener = 1; -} - -// This message specifies a header location to extract JWT token. -message JwtHeader { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtHeader"; - - // The HTTP header name. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The value prefix. The value format is "value_prefix" - // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the - // end. - string value_prefix = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; -} - -// Specify a required provider with audiences. -message ProviderWithAudiences { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences"; - - // Specify a required provider name. - string provider_name = 1; - - // This field overrides the one specified in the JwtProvider. - repeated string audiences = 2; -} - -// This message specifies a Jwt requirement. An empty message means JWT verification is not -// required. Here are some config examples: -// -// .. code-block:: yaml -// -// # Example 1: not required with an empty message -// -// # Example 2: require A -// provider_name: provider-A -// -// # Example 3: require A or B -// requires_any: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 4: require A and B -// requires_all: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 5: require A and (B or C) -// requires_all: -// requirements: -// - provider_name: provider-A -// - requires_any: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 6: require A or (B and C) -// requires_any: -// requirements: -// - provider_name: provider-A -// - requires_all: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows -// missing token.) -// requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// -// # Example 8: A is optional and B is required. -// requires_all: -// requirements: -// - requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// - provider_name: provider-B -// -// [#next-free-field: 7] -message JwtRequirement { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement"; - - oneof requires_type { - // Specify a required provider name. - string provider_name = 1; - - // Specify a required provider with audiences. - ProviderWithAudiences provider_and_audiences = 2; - - // Specify list of JwtRequirement. Their results are OR-ed. - // If any one of them passes, the result is passed. - JwtRequirementOrList requires_any = 3; - - // Specify list of JwtRequirement. Their results are AND-ed. - // All of them must pass, if one of them fails or missing, it fails. - JwtRequirementAndList requires_all = 4; - - // The requirement is always satisfied even if JWT is missing or the JWT - // verification fails. A typical usage is: this filter is used to only verify - // JWTs and pass the verified JWT payloads to another filter, the other filter - // will make decision. In this mode, all JWT tokens will be verified. - google.protobuf.Empty allow_missing_or_failed = 5; - - // The requirement is satisfied if JWT is missing, but failed if JWT is - // presented but invalid. Similar to allow_missing_or_failed, this is used - // to only verify JWTs and pass the verified payload to another filter. The - // different is this mode will reject requests with invalid tokens. - google.protobuf.Empty allow_missing = 6; - } -} - -// This message specifies a list of RequiredProvider. -// Their results are OR-ed; if any one of them passes, the result is passed -message JwtRequirementOrList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList"; - - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a list of RequiredProvider. -// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. -message JwtRequirementAndList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList"; - - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a Jwt requirement for a specific Route condition. -// Example 1: -// -// .. code-block:: yaml -// -// - match: -// prefix: /healthz -// -// In above example, "requires" field is empty for /healthz prefix match, -// it means that requests matching the path prefix don't require JWT authentication. -// -// Example 2: -// -// .. code-block:: yaml -// -// - match: -// prefix: / -// requires: { provider_name: provider-A } -// -// In above example, all requests matched the path prefix require jwt authentication -// from "provider-A". -message RequirementRule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.RequirementRule"; - - // The route matching parameter. Only when the match is satisfied, the "requires" field will - // apply. - // - // For example: following match will match all requests. - // - // .. code-block:: yaml - // - // match: - // prefix: / - // - config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Specify a Jwt requirement. - // If not specified, Jwt verification is disabled. - oneof requirement_type { - // Specify a Jwt requirement. Please see detail comment in message JwtRequirement. - JwtRequirement requires = 2; - - // Use requirement_name to specify a Jwt requirement. - // This requirement_name MUST be specified at the - // :ref:`requirement_map ` - // in `JwtAuthentication`. - string requirement_name = 3 [(validate.rules).string = {min_len: 1}]; - } -} - -// This message specifies Jwt requirements based on stream_info.filterState. -// This FilterState should use `Router::StringAccessor` object to set a string value. -// Other HTTP filters can use it to specify Jwt requirements dynamically. -// -// Example: -// -// .. code-block:: yaml -// -// name: jwt_selector -// requires: -// issuer_1: -// provider_name: issuer1 -// issuer_2: -// provider_name: issuer2 -// -// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, -// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. -message FilterStateRule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; - - // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A map of string keys to requirements. The string key is the string value - // in the FilterState with the name specified in the *name* field above. - map requires = 3; -} - -// This is the Envoy HTTP filter config for JWT authentication. -// -// For example: -// -// .. code-block:: yaml -// -// providers: -// provider1: -// issuer: issuer1 -// audiences: -// - audience1 -// - audience2 -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// timeout: 1s -// provider2: -// issuer: issuer2 -// local_jwks: -// inline_string: jwks_string -// -// rules: -// # Not jwt verification is required for /health path -// - match: -// prefix: /health -// -// # Jwt verification for provider1 is required for path prefixed with "prefix" -// - match: -// prefix: /prefix -// requires: -// provider_name: provider1 -// -// # Jwt verification for either provider1 or provider2 is required for all other requests. -// - match: -// prefix: / -// requires: -// requires_any: -// requirements: -// - provider_name: provider1 -// - provider_name: provider2 -// -// [#next-free-field: 6] -message JwtAuthentication { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication"; - - // Map of provider names to JwtProviders. - // - // .. code-block:: yaml - // - // providers: - // provider1: - // issuer: issuer1 - // audiences: - // - audience1 - // - audience2 - // remote_jwks: - // http_uri: - // uri: https://example.com/.well-known/jwks.json - // cluster: example_jwks_cluster - // timeout: 1s - // provider2: - // issuer: provider2 - // local_jwks: - // inline_string: jwks_string - // - map providers = 1; - - // Specifies requirements based on the route matches. The first matched requirement will be - // applied. If there are overlapped match conditions, please put the most specific match first. - // - // Examples - // - // .. code-block:: yaml - // - // rules: - // - match: - // prefix: /healthz - // - match: - // prefix: /baz - // requires: - // provider_name: provider1 - // - match: - // prefix: /foo - // requires: - // requires_any: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - match: - // prefix: /bar - // requires: - // requires_all: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - repeated RequirementRule rules = 2; - - // This message specifies Jwt requirements based on stream_info.filterState. - // Other HTTP filters can use it to specify Jwt requirements dynamically. - // The *rules* field above is checked first, if it could not find any matches, - // check this one. - FilterStateRule filter_state_rules = 3; - - // When set to true, bypass the `CORS preflight request - // `_ regardless of JWT - // requirements specified in the rules. - bool bypass_cors_preflight = 4; - - // A map of unique requirement_names to JwtRequirements. - // :ref:`requirement_name ` - // in `PerRouteConfig` uses this map to specify a JwtRequirement. - map requirement_map = 5; -} - -// Specify per-route config. -message PerRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.PerRouteConfig"; - - oneof requirement_specifier { - option (validate.required) = true; - - // Disable Jwt Authentication for this route. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Use requirement_name to specify a JwtRequirement. - // This requirement_name MUST be specified at the - // :ref:`requirement_map ` - // in `JwtAuthentication`. If no, the requests using this route will be rejected with 403. - string requirement_name = 2 [(validate.rules).string = {min_len: 1}]; - } -} diff --git a/api/envoy/extensions/filters/http/oauth2/v4alpha/BUILD b/api/envoy/extensions/filters/http/oauth2/v4alpha/BUILD deleted file mode 100644 index f833eacd57722..0000000000000 --- a/api/envoy/extensions/filters/http/oauth2/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", - "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto b/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto deleted file mode 100644 index 75002c995ccd4..0000000000000 --- a/api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.oauth2.v4alpha; - -import "envoy/config/core/v4alpha/http_uri.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; -import "envoy/type/matcher/v4alpha/path.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v4alpha"; -option java_outer_classname = "OauthProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: OAuth] -// OAuth :ref:`configuration overview `. -// [#extension: envoy.filters.http.oauth2] -// - -message OAuth2Credentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Credentials"; - - // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. - string client_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. - transport_sockets.tls.v4alpha.SdsSecretConfig token_secret = 2 - [(validate.rules).message = {required: true}]; - - // Configures how the secret token should be created. - oneof token_formation { - option (validate.required) = true; - - // If present, the secret token will be a HMAC using the provided secret. - transport_sockets.tls.v4alpha.SdsSecretConfig hmac_secret = 3 - [(validate.rules).message = {required: true}]; - } -} - -// OAuth config -// -// [#next-free-field: 11] -message OAuth2Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Config"; - - // Endpoint on the authorization server to retrieve the access token from. - config.core.v4alpha.HttpUri token_endpoint = 1; - - // The endpoint redirect to for authorization in response to unauthorized requests. - string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; - - // Credentials used for OAuth. - OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; - - // The redirect URI passed to the authorization endpoint. Supports header formatting - // tokens. For more information, including details on header value syntax, see the - // documentation on :ref:`custom request headers `. - // - // This URI should not contain any query parameters. - string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; - - // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. - type.matcher.v4alpha.PathMatcher redirect_path_matcher = 5 - [(validate.rules).message = {required: true}]; - - // The path to sign a user out, clearing their credential cookies. - type.matcher.v4alpha.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}]; - - // Forward the OAuth token as a Bearer to upstream web service. - bool forward_bearer_token = 7; - - // Any request that matches any of the provided matchers will be passed through without OAuth validation. - repeated config.route.v4alpha.HeaderMatcher pass_through_matcher = 8; - - // Optional list of OAuth scopes to be claimed in the authorization request. If not specified, - // defaults to "user" scope. - // OAuth RFC https://tools.ietf.org/html/rfc6749#section-3.3 - repeated string auth_scopes = 9; - - // Optional resource parameter for authorization request - // RFC: https://tools.ietf.org/html/rfc8707 - repeated string resources = 10; -} - -// Filter config. -message OAuth2 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2"; - - // Leave this empty to disable OAuth2 for a specific route, using per filter config. - OAuth2Config config = 1; -} diff --git a/api/envoy/extensions/filters/http/ratelimit/v4alpha/BUILD b/api/envoy/extensions/filters/http/ratelimit/v4alpha/BUILD deleted file mode 100644 index 329e11fc50179..0000000000000 --- a/api/envoy/extensions/filters/http/ratelimit/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v4alpha:pkg", - "//envoy/extensions/filters/http/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/ratelimit/v4alpha/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v4alpha/rate_limit.proto deleted file mode 100644 index 688be29e6aab7..0000000000000 --- a/api/envoy/extensions/filters/http/ratelimit/v4alpha/rate_limit.proto +++ /dev/null @@ -1,125 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ratelimit.v4alpha; - -import "envoy/config/ratelimit/v4alpha/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ratelimit.v4alpha"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.ratelimit] - -// [#next-free-field: 10] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ratelimit.v3.RateLimit"; - - // Defines the version of the standard to use for X-RateLimit headers. - enum XRateLimitHeadersRFCVersion { - // X-RateLimit headers disabled. - OFF = 0; - - // Use `draft RFC Version 03 `_. - DRAFT_VERSION_03 = 1; - } - - // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string = {min_len: 1}]; - - // Specifies the rate limit configurations to be applied with the same - // stage number. If not set, the default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The type of requests the filter should apply to. The supported - // types are *internal*, *external* or *both*. A request is considered internal if - // :ref:`x-envoy-internal` is set to true. If - // :ref:`x-envoy-internal` is not set or false, a - // request is considered external. The filter defaults to *both*, and it will apply to all request - // types. - string request_type = 3 - [(validate.rules).string = {in: "internal" in: "external" in: "both" in: ""}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - bool failure_mode_deny = 5; - - // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead - // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The - // HTTP code will be 200 for a gRPC response. - bool rate_limited_as_resource_exhausted = 6; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v4alpha.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message = {required: true}]; - - // Defines the standard version to use for X-RateLimit headers emitted by the filter: - // - // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the - // client in the current time-window followed by the description of the - // quota policy. The values are returned by the rate limiting service in - // :ref:`current_limit` - // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. - // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the - // current time-window. The values are returned by the rate limiting service - // in :ref:`limit_remaining` - // field. - // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of - // the current time-window. The values are returned by the rate limiting service - // in :ref:`duration_until_reset` - // field. - // - // In case rate limiting policy specifies more then one time window, the values - // above represent the window that is closest to reaching its limit. - // - // For more information about the headers specification see selected version of - // the `draft RFC `_. - // - // Disabled by default. - XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 - [(validate.rules).enum = {defined_only: true}]; - - // Disables emitting the :ref:`x-envoy-ratelimited` header - // in case of rate limiting (i.e. 429 responses). - // Having this header not present potentially makes the request retriable. - bool disable_x_envoy_ratelimited_header = 9; -} - -message RateLimitPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ratelimit.v3.RateLimitPerRoute"; - - enum VhRateLimitsOptions { - // Use the virtual host rate limits unless the route has a rate limit policy. - OVERRIDE = 0; - - // Use the virtual host rate limits even if the route has a rate limit policy. - INCLUDE = 1; - - // Ignore the virtual host rate limits even if the route does not have a rate limit policy. - IGNORE = 2; - } - - // Specifies if the rate limit filter should include the virtual host rate limits. - VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/api/envoy/extensions/filters/http/rbac/v4alpha/BUILD b/api/envoy/extensions/filters/http/rbac/v4alpha/BUILD deleted file mode 100644 index 02db15d5bde27..0000000000000 --- a/api/envoy/extensions/filters/http/rbac/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v4alpha:pkg", - "//envoy/extensions/filters/http/rbac/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto b/api/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto deleted file mode 100644 index 41040592caceb..0000000000000 --- a/api/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.rbac.v4alpha; - -import "envoy/config/rbac/v4alpha/rbac.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v4alpha"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.http.rbac] - -// RBAC filter config. -message RBAC { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.rbac.v3.RBAC"; - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - // If present and empty, DENY. - config.rbac.v4alpha.RBAC rules = 1; - - // Shadow rules are not enforced by the filter (i.e., returning a 403) - // but will emit stats and logs and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v4alpha.RBAC shadow_rules = 2; - - // If specified, shadow rules will emit stats with the given prefix. - // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with - // shadow rules. - string shadow_rules_stat_prefix = 3; -} - -message RBACPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.rbac.v3.RBACPerRoute"; - - reserved 1; - - // Override the global configuration of the filter with this new config. - // If absent, the global RBAC policy will be disabled for this route. - RBAC rbac = 2; -} diff --git a/api/envoy/extensions/filters/http/router/v4alpha/BUILD b/api/envoy/extensions/filters/http/router/v4alpha/BUILD deleted file mode 100644 index b22ea48735c71..0000000000000 --- a/api/envoy/extensions/filters/http/router/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/extensions/filters/http/router/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/router/v4alpha/router.proto b/api/envoy/extensions/filters/http/router/v4alpha/router.proto deleted file mode 100644 index 2d72bd1470c02..0000000000000 --- a/api/envoy/extensions/filters/http/router/v4alpha/router.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.router.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v4alpha"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Router] -// Router :ref:`configuration overview `. -// [#extension: envoy.filters.http.router] - -// [#next-free-field: 8] -message Router { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.router.v3.Router"; - - // Whether the router generates dynamic cluster statistics. Defaults to - // true. Can be disabled in high performance scenarios. - google.protobuf.BoolValue dynamic_stats = 1; - - // Whether to start a child span for egress routed calls. This can be - // useful in scenarios where other filters (auth, ratelimit, etc.) make - // outbound calls and have child spans rooted at the same ingress - // parent. Defaults to false. - bool start_child_span = 2; - - // Configuration for HTTP upstream logs emitted by the router. Upstream logs - // are configured in the same way as access logs, but each log entry represents - // an upstream request. Presuming retries are configured, multiple upstream - // requests may be made for each downstream (inbound) request. - repeated config.accesslog.v4alpha.AccessLog upstream_log = 3; - - // Do not add any additional *x-envoy-* headers to requests or responses. This - // only affects the :ref:`router filter generated *x-envoy-* headers - // `, other Envoy filters and the HTTP - // connection manager may continue to set *x-envoy-* headers. - bool suppress_envoy_headers = 4; - - // Specifies a list of HTTP headers to strictly validate. Envoy will reject a - // request and respond with HTTP status 400 if the request contains an invalid - // value for any of the headers listed in this field. Strict header checking - // is only supported for the following headers: - // - // Value must be a ','-delimited list (i.e. no spaces) of supported retry - // policy values: - // - // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` - // * :ref:`config_http_filters_router_x-envoy-retry-on` - // - // Value must be an integer: - // - // * :ref:`config_http_filters_router_x-envoy-max-retries` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated = { - items { - string { - in: "x-envoy-upstream-rq-timeout-ms" - in: "x-envoy-upstream-rq-per-try-timeout-ms" - in: "x-envoy-max-retries" - in: "x-envoy-retry-grpc-on" - in: "x-envoy-retry-on" - } - } - }]; - - // If not set, ingress Envoy will ignore - // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress - // Envoy, when deriving timeout for upstream cluster. - bool respect_expected_rq_timeout = 6; - - // If set, Envoy will avoid incrementing HTTP failure code stats - // on gRPC requests. This includes the individual status code value - // (e.g. upstream_rq_504) and group stats (e.g. upstream_rq_5xx). - // This field is useful if interested in relying only on the gRPC - // stats filter to define success and failure metrics for gRPC requests - // as not all failed gRPC requests charge HTTP status code metrics. See - // :ref:`gRPC stats filter` documentation - // for more details. - bool suppress_grpc_request_failure_code_stats = 7; -} diff --git a/api/envoy/extensions/filters/http/tap/v4alpha/BUILD b/api/envoy/extensions/filters/http/tap/v4alpha/BUILD deleted file mode 100644 index 7e5b65cef9b51..0000000000000 --- a/api/envoy/extensions/filters/http/tap/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/tap/v4alpha:pkg", - "//envoy/extensions/filters/http/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/http/tap/v4alpha/tap.proto b/api/envoy/extensions/filters/http/tap/v4alpha/tap.proto deleted file mode 100644 index 98798be8bfd2b..0000000000000 --- a/api/envoy/extensions/filters/http/tap/v4alpha/tap.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.tap.v4alpha; - -import "envoy/extensions/common/tap/v4alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v4alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap] -// Tap :ref:`configuration overview `. -// [#extension: envoy.filters.http.tap] - -// Top level configuration for the tap filter. -message Tap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.tap.v3.Tap"; - - // Common configuration for the HTTP tap filter. - common.tap.v4alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD deleted file mode 100644 index 752598d2f6250..0000000000000 --- a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto deleted file mode 100644 index 30499c27f6f0a..0000000000000 --- a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.v4alpha; - -import "envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; -option java_outer_classname = "DubboProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dubbo Proxy] -// Dubbo Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.dubbo_proxy] - -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - // the default protocol. - Dubbo = 0; -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - // the default serialization protocol. - Hessian2 = 0; -} - -// [#next-free-field: 6] -message DubboProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; - - // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; - - // The route table for the connection manager is static and is specified in this property. - repeated RouteConfiguration route_config = 4; - - // A list of individual Dubbo filters that make up the filter chain for requests made to the - // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no dubbo_filters are specified, a default Dubbo router filter - // (`envoy.filters.dubbo.router`) is used. - repeated DubboFilter dubbo_filters = 5; -} - -// DubboFilter configures a Dubbo filter. -message DubboFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter"; - - // The name of the filter to instantiate. The name must match a supported - // filter. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any config = 2; -} diff --git a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto deleted file mode 100644 index d6314279ed2b6..0000000000000 --- a/api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto +++ /dev/null @@ -1,129 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.v4alpha; - -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/v3/range.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dubbo Proxy Route Configuration] -// Dubbo Proxy :ref:`configuration overview `. - -// [#next-free-field: 6] -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration"; - - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The interface name of the service. Wildcard interface are supported in the suffix or prefix form. - // e.g. ``*.methods.add`` will match ``com.dev.methods.add``, ``com.prod.methods.add``, etc. - // ``com.dev.methods.*`` will match ``com.dev.methods.add``, ``com.dev.methods.update``, etc. - // Special wildcard ``*`` matching any interface. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*.methods.add`` will match ``com.dev.methods.add`` but not ``.methods.add``. - string interface = 2; - - // Which group does the interface belong to. - string group = 3; - - // The version number of the interface. - string version = 4; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 5; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch"; - - // Method level routing matching. - MethodMatch method = 1; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated config.route.v4alpha.HeaderMatcher headers = 2; -} - -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction"; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - // Currently ClusterWeight only supports the name and weight fields. - config.route.v4alpha.WeightedCluster weighted_clusters = 2; - } -} - -message MethodMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch"; - - // The parameter matching type. - message ParameterMatchSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier"; - - oneof parameter_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 3; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting - // of an optional plus or minus sign followed by a sequence of digits. The rule will not match - // if the header value does not represent an integer. Match will fail for empty values, - // floating point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, - // "somestring", 10.9, "-1somestring" - type.v3.Int64Range range_match = 4; - } - } - - // The name of the method. - type.matcher.v4alpha.StringMatcher name = 1; - - // Method parameter definition. - // The key is the parameter index, starting from 0. - // The value is the parameter matching type. - map params_match = 2; -} diff --git a/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD b/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD deleted file mode 100644 index 6d146b1c64d18..0000000000000 --- a/api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/network/ext_authz/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto b/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto deleted file mode 100644 index 21f30481292fa..0000000000000 --- a/api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ext_authz.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; -import "envoy/type/matcher/v4alpha/metadata.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v4alpha"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Network External Authorization ] -// The network layer external authorization service configuration -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.ext_authz] - -// External Authorization filter calls out to an external service over the -// gRPC Authorization API defined by -// :ref:`CheckRequest `. -// A failed check will cause this filter to close the TCP connection. -// [#next-free-field: 8] -message ExtAuthz { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.ext_authz.v3.ExtAuthz"; - - // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The external authorization gRPC service configuration. - // The default timeout is set to 200ms by this filter. - config.core.v4alpha.GrpcService grpc_service = 2; - - // The filter's behaviour in case the external authorization service does - // not respond back. When it is set to true, Envoy will also allow traffic in case of - // communication failure between authorization service and the proxy. - // Defaults to false. - bool failure_mode_allow = 3; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 4; - - // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and - // version of Check{Request,Response} used on the wire. - config.core.v4alpha.ApiVersion transport_api_version = 5 - [(validate.rules).enum = {defined_only: true}]; - - // Specifies if the filter is enabled with metadata matcher. - // If this field is not specified, the filter will be enabled for all requests. - type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 6; - - // Optional labels that will be passed to :ref:`labels` in - // :ref:`destination`. - // The labels will be read from :ref:`metadata` with the specified key. - string bootstrap_metadata_labels_key = 7; -} diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD b/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD index 456f4e9e61702..55b63248136ce 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/annotations:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index fa65ae4bcf757..3fb4bfa09e206 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -19,7 +19,6 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; @@ -501,24 +500,7 @@ message HttpConnectionManager { // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - // - // .. note:: - // This field is deprecated and instead :ref:`original_ip_detection_extensions - // ` - // should be used to configure the :ref:`xff extension ` - // to configure IP detection using the :ref:`config_http_conn_man_headers_x-forwarded-for` header. To replace - // this field use a config like the following: - // - // .. code-block:: yaml - // - // original_ip_detection_extensions: - // - name: envoy.http.original_ip_detection.xff - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.xff.v3.XffConfig - // xff_num_trusted_hops: 1 - // - uint32 xff_num_trusted_hops = 19 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + uint32 xff_num_trusted_hops = 19; // The configuration for the original IP detection extensions. // @@ -530,6 +512,12 @@ message HttpConnectionManager { // the request. If the request isn't rejected nor any extension succeeds, the HCM will // fallback to using the remote address. // + // .. WARNING:: + // Extensions cannot be used in conjunction with :ref:`use_remote_address + // ` + // nor :ref:`xff_num_trusted_hops + // `. + // // [#extension-category: envoy.http.original_ip_detection] repeated config.core.v3.TypedExtensionConfig original_ip_detection_extensions = 46; diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD deleted file mode 100644 index 64536cdef30b9..0000000000000 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/config/trace/v4alpha:pkg", - "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", - "//envoy/type/http/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto deleted file mode 100644 index bf3cc9ef34a49..0000000000000 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ /dev/null @@ -1,1005 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.http_connection_manager.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/protocol.proto"; -import "envoy/config/core/v4alpha/substitution_format_string.proto"; -import "envoy/config/route/v4alpha/route.proto"; -import "envoy/config/route/v4alpha/scoped_route.proto"; -import "envoy/config/trace/v4alpha/http_tracer.proto"; -import "envoy/type/http/v3/path_transformation.proto"; -import "envoy/type/tracing/v3/custom_tag.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v4alpha"; -option java_outer_classname = "HttpConnectionManagerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP connection manager] -// HTTP connection manager :ref:`configuration overview `. -// [#extension: envoy.filters.network.http_connection_manager] - -// [#next-free-field: 49] -message HttpConnectionManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; - - enum CodecType { - // For every new connection, the connection manager will determine which - // codec to use. This mode supports both ALPN for TLS listeners as well as - // protocol inference for plaintext listeners. If ALPN data is available, it - // is preferred, otherwise protocol inference is used. In almost all cases, - // this is the right option to choose for this setting. - AUTO = 0; - - // The connection manager will assume that the client is speaking HTTP/1.1. - HTTP1 = 1; - - // The connection manager will assume that the client is speaking HTTP/2 - // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. - // Prior knowledge is allowed). - HTTP2 = 2; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 3; - } - - enum ServerHeaderTransformation { - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - } - - // Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. - // This operation occurs before URL normalization and the merge slashes transformations if they were enabled. - enum PathWithEscapedSlashesAction { - // Default behavior specific to implementation (i.e. Envoy) of this configuration option. - // Envoy, by default, takes the KEEP_UNCHANGED action. - // NOTE: the implementation may change the default behavior at-will. - IMPLEMENTATION_SPECIFIC_DEFAULT = 0; - - // Keep escaped slashes. - KEEP_UNCHANGED = 1; - - // Reject client request with the 400 status. gRPC requests will be rejected with the INTERNAL (13) error code. - // The "httpN.downstream_rq_failed_path_normalization" counter is incremented for each rejected request. - REJECT_REQUEST = 2; - - // Unescape %2F and %5C sequences and redirect request to the new path if these sequences were present. - // Redirect occurs after path normalization and merge slashes transformations if they were configured. - // NOTE: gRPC requests will be rejected with the INTERNAL (13) error code. - // This option minimizes possibility of path confusion exploits by forcing request with unescaped slashes to - // traverse all parties: downstream client, intermediate proxies, Envoy and upstream server. - // The "httpN.downstream_rq_redirected_with_normalized_path" counter is incremented for each - // redirected request. - UNESCAPE_AND_REDIRECT = 3; - - // Unescape %2F and %5C sequences. - // Note: this option should not be enabled if intermediaries perform path based access control as - // it may lead to path confusion vulnerabilities. - UNESCAPE_AND_FORWARD = 4; - } - - // [#next-free-field: 10] - message Tracing { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing"; - - enum OperationName { - // The HTTP listener is used for ingress/incoming requests. - INGRESS = 0; - - // The HTTP listener is used for egress/outgoing requests. - EGRESS = 1; - } - - reserved 1, 2; - - reserved "operation_name", "request_headers_for_tags"; - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.v3.Percent client_sampling = 3; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.Percent random_sampling = 4; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.Percent overall_sampling = 5; - - // Whether to annotate spans with additional data. If true, spans will include logs for stream - // events. - bool verbose = 6; - - // Maximum length of the request path to extract and include in the HttpUrl tag. Used to - // truncate lengthy request paths to meet the needs of a tracing backend. - // Default: 256 - google.protobuf.UInt32Value max_path_tag_length = 7; - - // A list of custom tags with unique tag name to create tags for the active span. - repeated type.tracing.v3.CustomTag custom_tags = 8; - - // Configuration for an external tracing provider. - // If not specified, no tracing will be performed. - // - // .. attention:: - // Please be aware that *envoy.tracers.opencensus* provider can only be configured once - // in Envoy lifetime. - // Any attempts to reconfigure it or to use different configurations for different HCM filters - // will be rejected. - // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes - // on OpenCensus side. - config.trace.v4alpha.Tracing.Http provider = 9; - } - - message InternalAddressConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." - "InternalAddressConfig"; - - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - - // [#next-free-field: 7] - message SetCurrentClientCertDetails { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." - "SetCurrentClientCertDetails"; - - reserved 2; - - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - } - - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." - "UpgradeConfig"; - - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - } - - // [#not-implemented-hide:] Transformations that apply to path headers. Transformations are applied - // before any processing of requests by HTTP filters, routing, and matching. Only the normalized - // path will be visible internally if a transformation is enabled. Any path rewrites that the - // router performs (e.g. :ref:`regex_rewrite - // ` or :ref:`prefix_rewrite - // `) will apply to the *:path* header - // destined for the upstream. - // - // Note: access logging and tracing will show the original *:path* header. - message PathNormalizationOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." - "PathNormalizationOptions"; - - // [#not-implemented-hide:] Normalization applies internally before any processing of requests by - // HTTP filters, routing, and matching *and* will affect the forwarded *:path* header. Defaults - // to :ref:`NormalizePathRFC3986 - // `. When not - // specified, this value may be overridden by the runtime variable - // :ref:`http_connection_manager.normalize_path`. - // Envoy will respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 - // normalization due to disallowed characters.) - type.http.v3.PathTransformation forwarding_transformation = 1; - - // [#not-implemented-hide:] Normalization only applies internally before any processing of - // requests by HTTP filters, routing, and matching. These will be applied after full - // transformation is applied. The *:path* header before this transformation will be restored in - // the router filter and sent upstream unless it was mutated by a filter. Defaults to no - // transformations. - // Multiple actions can be applied in the same Transformation, forming a sequential - // pipeline. The transformations will be performed in the order that they appear. Envoy will - // respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 - // normalization due to disallowed characters.) - type.http.v3.PathTransformation http_filter_transformation = 2; - } - - reserved 27, 11, 19; - - reserved "idle_timeout", "xff_num_trusted_hops"; - - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; - - // The route table for the connection manager is static and is specified in this property. - config.route.v4alpha.RouteConfiguration route_config = 4; - - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } - - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. :ref:`Order matters ` - // as the filters are processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; - - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; - - // Presence of the object defines whether the connection manager - // emits :ref:`tracing ` data to the :ref:`configured tracing provider - // `. - Tracing tracing = 7; - - // Additional settings for HTTP requests handled by the connection manager. These will be - // applicable to both HTTP1 and HTTP2 requests. - config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; - - // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. - // [#not-implemented-hide:] - config.core.v4alpha.Http3ProtocolOptions http3_protocol_options = 44; - - // An optional override that the connection manager will write to the server - // header in responses. If not set, the default is *envoy*. - string server_name = 10 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Defines the action to be applied to the Server header on the response path. - // By default, Envoy will overwrite the header with the value specified in - // server_name. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; - - // Allows for explicit transformation of the :scheme header on the request path. - // If not set, Envoy's default :ref:`scheme ` - // handling applies. - config.core.v4alpha.SchemeHeaderTransformation scheme_header_transformation = 48; - - // The maximum request headers size for incoming connections. - // If unconfigured, the default max request headers allowed is 60 KiB. - // Requests that exceed this limit will receive a 431 response. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 8192 gt: 0}]; - - // The stream idle timeout for connections managed by the connection manager. - // If not specified, this defaults to 5 minutes. The default value was selected - // so as not to interfere with any smaller configured timeouts that may have - // existed in configurations prior to the introduction of this feature, while - // introducing robustness to TCP connections that terminate without a FIN. - // - // This idle timeout applies to new streams and is overridable by the - // :ref:`route-level idle_timeout - // `. Even on a stream in - // which the override applies, prior to receipt of the initial request - // headers, the :ref:`stream_idle_timeout - // ` - // applies. Each time an encode/decode event for headers or data is processed - // for the stream, the timer will be reset. If the timeout fires, the stream - // is terminated with a 408 Request Timeout error code if no upstream response - // header has been received, otherwise a stream reset occurs. - // - // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough - // window to write any remaining stream data once the entirety of stream data (local end stream is - // true) has been buffered pending available window. In other words, this timeout defends against - // a peer that does not release enough window to completely write the stream, even though all - // data has been proxied within available flow control windows. If the timeout is hit in this - // case, the :ref:`tx_flush_timeout ` counter will be - // incremented. Note that :ref:`max_stream_duration - // ` does not apply to - // this corner case. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled according to the value for - // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - // - // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due - // to the granularity of events presented to the connection manager. For example, while receiving - // very large request headers, it may be the case that there is traffic regularly arriving on the - // wire while the connection manage is only able to observe the end-of-headers event, hence the - // stream may still idle timeout. - // - // A value of 0 will completely disable the connection manager stream idle - // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The amount of time that Envoy will wait for the entire request to be received. - // The timer is activated when the request is initiated, and is disarmed when the last byte of the - // request is sent upstream (i.e. all decoding filters have processed the request), OR when the - // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The amount of time that Envoy will wait for the request headers to be received. The timer is - // activated when the first byte of the headers is received, and is disarmed when the last byte of - // the headers has been received. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_headers_timeout = 41 [ - (validate.rules).duration = {gte {}}, - (udpa.annotations.security).configure_for_untrusted_downstream = true - ]; - - // The time that Envoy will wait between sending an HTTP/2 “shutdown - // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - // This is used so that Envoy provides a grace period for new streams that - // race with the final GOAWAY frame. During this grace period, Envoy will - // continue to accept new streams. After the grace period, a final GOAWAY - // frame is sent and Envoy will start refusing new streams. Draining occurs - // both when a connection hits the idle timeout or during general server - // draining. The default grace period is 5000 milliseconds (5 seconds) if this - // option is not specified. - google.protobuf.Duration drain_timeout = 12; - - // The delayed close timeout is for downstream connections managed by the HTTP connection manager. - // It is defined as a grace period after connection close processing has been locally initiated - // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy - // from the downstream connection) prior to Envoy closing the socket associated with that - // connection. - // NOTE: This timeout is enforced even when the socket associated with the downstream connection - // is pending a flush of the write buffer. However, any progress made writing data to the socket - // will restart the timer associated with this timeout. This means that the total grace period for - // a socket in this state will be - // +. - // - // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close - // sequence mitigates a race condition that exists when downstream clients do not drain/process - // data in a connection's receive buffer after a remote close has been detected via a socket - // write(). This race leads to such clients failing to process the response code sent by Envoy, - // which could result in erroneous downstream processing. - // - // If the timeout triggers, Envoy will close the connection's socket. - // - // The default timeout is 1000 ms if this option is not specified. - // - // .. NOTE:: - // To be useful in avoiding the race condition described above, this timeout must be set - // to *at least* +<100ms to account for - // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. - // - // .. WARNING:: - // A value of 0 will completely disable delayed close processing. When disabled, the downstream - // connection's socket will be closed immediately after the write flush is completed or will - // never close if the write flush does not complete. - google.protobuf.Duration delayed_close_timeout = 26; - - // Configuration for :ref:`HTTP access logs ` - // emitted by the connection manager. - repeated config.accesslog.v4alpha.AccessLog access_log = 13; - - // If set to true, the connection manager will use the real remote address - // of the client connection when determining internal versus external origin and manipulating - // various headers. If set to false or absent, the connection manager will use the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for`, - // :ref:`config_http_conn_man_headers_x-envoy-internal`, and - // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The configuration for the original IP detection extensions. - // - // When configured the extensions will be called along with the request headers - // and information about the downstream connection, such as the directly connected address. - // Each extension will then use these parameters to decide the request's effective remote address. - // If an extension fails to detect the original IP address and isn't configured to reject - // the request, the HCM will try the remaining extensions until one succeeds or rejects - // the request. If the request isn't rejected nor any extension succeeds, the HCM will - // fallback to using the remote address. - // - // [#extension-category: envoy.http.original_ip_detection] - repeated config.core.v4alpha.TypedExtensionConfig original_ip_detection_extensions = 46; - - // Configures what network addresses are considered internal for stats and header sanitation - // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. - // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information about internal/external addresses. - InternalAddressConfig internal_address_config = 25; - - // If set, Envoy will not append the remote address to the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in - // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager - // has mutated the request headers. While :ref:`use_remote_address - // ` - // will also suppress XFF addition, it has consequences for logging and other - // Envoy uses of the remote address, so *skip_xff_append* should be used - // when only an elision of XFF addition is intended. - bool skip_xff_append = 21; - - // Via header value to append to request and response headers. If this is - // empty, no via header will be appended. - string via = 22 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Whether the connection manager will generate the :ref:`x-request-id - // ` header if it does not exist. This defaults to - // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature - // is not desired it can be disabled. - google.protobuf.BoolValue generate_request_id = 15; - - // Whether the connection manager will keep the :ref:`x-request-id - // ` header if passed for a request that is edge - // (Edge request is the request from external clients to front Envoy) and not reset it, which - // is the current Envoy behaviour. This defaults to false. - bool preserve_external_request_id = 32; - - // If set, Envoy will always set :ref:`x-request-id ` header in response. - // If this is false or not set, the request ID is returned in responses only if tracing is forced using - // :ref:`x-envoy-force-trace ` header. - bool always_set_request_id_in_response = 37; - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; - - // This field is valid only when :ref:`forward_client_cert_details - // ` - // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in - // the client certificate to be forwarded. Note that in the - // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - // *By* is always set when the client certificate presents the URI type Subject Alternative Name - // value. - SetCurrentClientCertDetails set_current_client_cert_details = 17; - - // If proxy_100_continue is true, Envoy will proxy incoming "Expect: - // 100-continue" headers upstream, and forward "100 Continue" responses - // downstream. If this is false or not set, Envoy will instead strip the - // "Expect: 100-continue" header, and send a "100 Continue" response itself. - bool proxy_100_continue = 18; - - // If - // :ref:`use_remote_address - // ` - // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. - // This is useful for testing compatibility of upstream services that parse the header value. For - // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - // `_ for details. This will also affect the - // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See - // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - // ` for runtime - // control. - // [#not-implemented-hide:] - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - - repeated UpgradeConfig upgrade_configs = 23; - - // Should paths be normalized according to RFC 3986 before any processing of - // requests by HTTP filters or routing? This affects the upstream *:path* header - // as well. For paths that fail this check, Envoy will respond with 400 to - // paths that are malformed. This defaults to false currently but will default - // true in the future. When not specified, this value may be overridden by the - // runtime variable - // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison `_ - // for details of normalization. - // Note that Envoy does not perform - // `case normalization `_ - google.protobuf.BoolValue normalize_path = 30; - - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec `_ and is provided for convenience. - bool merge_slashes = 33; - - // Action to take when request URL path contains escaped slash sequences (%2F, %2f, %5C and %5c). - // The default value can be overridden by the :ref:`http_connection_manager.path_with_escaped_slashes_action` - // runtime variable. - // The :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime - // variable can be used to apply the action to a portion of all requests. - PathWithEscapedSlashesAction path_with_escaped_slashes_action = 45; - - // The configuration of the request ID extension. This includes operations such as - // generation, validation, and associated tracing operations. If empty, the - // :ref:`UuidRequestIdConfig ` - // default extension is used with default parameters. See the documentation for that extension - // for details on what it does. Customizing the configuration for the default extension can be - // achieved by configuring it explicitly here. For example, to disable trace reason packing, - // the following configuration can be used: - // - // .. validated-code-block:: yaml - // :type-name: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension - // - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig - // pack_trace_reason: false - // - // [#extension-category: envoy.request_id] - RequestIDExtension request_id_extension = 36; - - // The configuration to customize local reply returned by Envoy. It can customize status code, - // body text and response content type. If not specified, status code and text body are hard - // coded in Envoy, the response content type is plain text. - LocalReplyConfig local_reply_config = 38; - - oneof strip_port_mode { - // Determines if the port part should be removed from host/authority header before any processing - // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` - // local port. This affects the upstream host header unless the method is - // CONNECT in which case if no filter adds a port the original port will be restored before headers are - // sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. - bool strip_matching_host_port = 39; - - // Determines if the port part should be removed from host/authority header before any processing - // of request by HTTP filters or routing. - // This affects the upstream host header unless the method is CONNECT in - // which case if no filter adds a port the original port will be restored before headers are sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. - bool strip_any_host_port = 42; - } - - // Governs Envoy's behavior when receiving invalid HTTP from downstream. - // If this option is false (default), Envoy will err on the conservative side handling HTTP - // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. - // If this option is set to true, Envoy will be more permissive, only resetting the invalid - // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire - // request is read for HTTP/1.1) - // In general this should be true for deployments receiving trusted traffic (L2 Envoys, - // company-internal mesh) and false when receiving untrusted traffic (edge deployments). - // - // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are - // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message - // ` or the new HTTP/2 option - // :ref:`override_stream_error_on_invalid_http_message - // ` - // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging - // ` - google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; - - // [#not-implemented-hide:] Path normalization configuration. This includes - // configurations for transformations (e.g. RFC 3986 normalization or merge - // adjacent slashes) and the policy to apply them. The policy determines - // whether transformations affect the forwarded *:path* header. RFC 3986 path - // normalization is enabled by default and the default policy is that the - // normalized header will be forwarded. See :ref:`PathNormalizationOptions - // ` - // for details. - PathNormalizationOptions path_normalization_options = 43; - - // Determines if trailing dot of the host should be removed from host/authority header before any - // processing of request by HTTP filters or routing. - // This affects the upstream host header. - // Without setting this option, incoming requests with host `example.com.` will not match against - // route with :ref:`domains` match set to `example.com`. Defaults to `false`. - // When the incoming request contains a host/authority header that includes a port number, - // setting this option will strip a trailing dot, if present, from the host section, - // leaving the port as is (e.g. host value `example.com.:443` will be updated to `example.com:443`). - bool strip_trailing_host_dot = 47; -} - -// The configuration to customize local reply returned by Envoy. -message LocalReplyConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig"; - - // Configuration of list of mappers which allows to filter and change local response. - // The mappers will be checked by the specified order until one is matched. - repeated ResponseMapper mappers = 1; - - // The configuration to form response body from the :ref:`command operators ` - // and to specify response content type as one of: plain/text or application/json. - // - // Example one: "plain/text" ``body_format``. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // The following response body in "plain/text" format will be generated for a request with - // local reply body of "upstream connection error", response_code=503 and path=/foo. - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - // Example two: "application/json" ``body_format``. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // json_format: - // status: "%RESPONSE_CODE%" - // message: "%LOCAL_REPLY_BODY%" - // path: "%REQ(:path)%" - // - // The following response body in "application/json" format would be generated for a request with - // local reply body of "upstream connection error", response_code=503 and path=/foo. - // - // .. code-block:: json - // - // { - // "status": 503, - // "message": "upstream connection error", - // "path": "/foo" - // } - // - config.core.v4alpha.SubstitutionFormatString body_format = 2; -} - -// The configuration to filter and change local response. -// [#next-free-field: 6] -message ResponseMapper { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; - - // Filter to determine if this mapper should apply. - config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; - - // The new response status code if specified. - google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_format`. - config.core.v4alpha.DataSource body = 3; - - // A per mapper `body_format` to override the :ref:`body_format `. - // It will be used when this mapper is matched. - config.core.v4alpha.SubstitutionFormatString body_format_override = 4; - - // HTTP headers to add to a local reply. This allows the response mapper to append, to add - // or to override headers of any local reply before it is sent to a downstream client. - repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; -} - -message Rds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.Rds"; - - // Configuration source specifier for RDS. - config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2; -} - -// This message is used to work around the limitations with 'oneof' and repeated fields. -message ScopedRouteConfigurationsList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList"; - - repeated config.route.v4alpha.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated = {min_items: 1}]; -} - -// [#next-free-field: 6] -message ScopedRoutes { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes"; - - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These - // keys are matched against a set of :ref:`Key` - // objects assembled from :ref:`ScopedRouteConfiguration` - // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via - // :ref:`scoped_route_configurations_list`. - // - // Upon receiving a request's headers, the Router will build a key using the algorithm specified - // by this message. This key will be used to look up the routing table (i.e., the - // :ref:`RouteConfiguration`) to use for the request. - message ScopeKeyBuilder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder"; - - // Specifies the mechanism for constructing key fragments which are composed into scope keys. - message FragmentBuilder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." - "ScopeKeyBuilder.FragmentBuilder"; - - // Specifies how the value of a header should be extracted. - // The following example maps the structure of a header to the fields in this message. - // - // .. code:: - // - // <0> <1> <-- index - // X-Header: a=b;c=d - // | || | - // | || \----> - // | || - // | |\----> - // | | - // | \----> - // | - // \----> - // - // Each 'a=b' key-value pair constitutes an 'element' of the header field. - message HeaderValueExtractor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." - "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor"; - - // Specifies a header field's key value pair to match on. - message KvElement { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." - "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement"; - - // The separator between key and value (e.g., '=' separates 'k=v;...'). - // If an element is an empty string, the element is ignored. - // If an element contains no separator, the whole element is parsed as key and the - // fragment value is an empty string. - // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_len: 1}]; - - // The key to match on. - string key = 2 [(validate.rules).string = {min_len: 1}]; - } - - // The name of the header field to extract the value from. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - - oneof extract_type { - // Specifies the zero based index of the element to extract. - // Note Envoy concatenates multiple values of the same header key into a comma separated - // string, the splitting always happens after the concatenation. - uint32 index = 3; - - // Specifies the key value pair to extract the value from. - KvElement element = 4; - } - } - - oneof type { - option (validate.required) = true; - - // Specifies how a header field's value should be extracted. - HeaderValueExtractor header_value_extractor = 1; - } - } - - // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the - // fragments of a :ref:`ScopedRouteConfiguration`. - // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; - - // Configuration source specifier for RDS. - // This config source is used to subscribe to RouteConfiguration resources specified in - // ScopedRouteConfiguration messages. - config.core.v4alpha.ConfigSource rds_config_source = 3 - [(validate.rules).message = {required: true}]; - - oneof config_specifier { - option (validate.required) = true; - - // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by - // matching a key constructed from the request's attributes according to the algorithm specified - // by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRouteConfigurationsList scoped_route_configurations_list = 4; - - // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS - // API. A scope is assigned to a request by matching a key constructed from the request's - // attributes according to the algorithm specified by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRds scoped_rds = 5; - } -} - -message ScopedRds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds"; - - // Configuration source specifier for scoped RDS. - config.core.v4alpha.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message = {required: true}]; - - // xdstp:// resource locator for scoped RDS collection. - // [#not-implemented-hide:] - string srds_resources_locator = 2; -} - -// [#next-free-field: 7] -message HttpFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; - - reserved 3, 2; - - reserved "config"; - - // The name of the filter configuration. The name is used as a fallback to - // select an extension if the type of the configuration proto is not - // sufficient. It also serves as a resource name in ExtensionConfigDS. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - // - // To support configuring a :ref:`match tree `, use an - // :ref:`ExtensionWithMatcher ` - // with the desired HTTP filter. - // [#extension-category: envoy.filters.http] - google.protobuf.Any typed_config = 4; - - // Configuration source specifier for an extension configuration discovery service. - // In case of a failure and without the default configuration, the HTTP listener responds with code 500. - // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). - // - // To support configuring a :ref:`match tree `, use an - // :ref:`ExtensionWithMatcher ` - // with the desired HTTP filter. This works for both the default filter configuration as well - // as for filters provided via the API. - config.core.v4alpha.ExtensionConfigSource config_discovery = 5; - } - - // If true, clients that do not support this filter may ignore the - // filter but otherwise accept the config. - // Otherwise, clients that do not support this filter must reject the config. - // This is also same with typed per filter config. - bool is_optional = 6; -} - -message RequestIDExtension { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension"; - - // Request ID extension specific configuration. - google.protobuf.Any typed_config = 1; -} - -// [#protodoc-title: Envoy Mobile HTTP connection manager] -// HTTP connection manager for use in Envoy mobile. -// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] -message EnvoyMobileHttpConnectionManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3." - "EnvoyMobileHttpConnectionManager"; - - // The configuration for the underlying HttpConnectionManager which will be - // instantiated for Envoy mobile. - HttpConnectionManager config = 1; -} diff --git a/api/envoy/extensions/filters/network/ratelimit/v4alpha/BUILD b/api/envoy/extensions/filters/network/ratelimit/v4alpha/BUILD deleted file mode 100644 index d9d0ca109526e..0000000000000 --- a/api/envoy/extensions/filters/network/ratelimit/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v4alpha:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "//envoy/extensions/filters/network/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/ratelimit/v4alpha/rate_limit.proto b/api/envoy/extensions/filters/network/ratelimit/v4alpha/rate_limit.proto deleted file mode 100644 index b53cb3bcc1d09..0000000000000 --- a/api/envoy/extensions/filters/network/ratelimit/v4alpha/rate_limit.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ratelimit.v4alpha; - -import "envoy/config/ratelimit/v4alpha/rls.proto"; -import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ratelimit.v4alpha"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.ratelimit] - -// [#next-free-field: 7] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.ratelimit.v3.RateLimit"; - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string = {min_len: 1}]; - - // The rate limit descriptor list to use in the rate limit service request. - repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated = {min_items: 1}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 5; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v4alpha.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/extensions/filters/network/rbac/v4alpha/BUILD b/api/envoy/extensions/filters/network/rbac/v4alpha/BUILD deleted file mode 100644 index 27418dd3299e4..0000000000000 --- a/api/envoy/extensions/filters/network/rbac/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v4alpha:pkg", - "//envoy/extensions/filters/network/rbac/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto b/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto deleted file mode 100644 index 3512bae2d2aba..0000000000000 --- a/api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rbac.v4alpha; - -import "envoy/config/rbac/v4alpha/rbac.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v4alpha"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.network.rbac] - -// RBAC network filter config. -// -// Header should not be used in rules/shadow_rules in RBAC network filter as -// this information is only available in :ref:`RBAC http filter `. -// [#next-free-field: 6] -message RBAC { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rbac.v3.RBAC"; - - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - } - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - // If present and empty, DENY. - config.rbac.v4alpha.RBAC rules = 1; - - // Shadow rules are not enforced by the filter but will emit stats and logs - // and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v4alpha.RBAC shadow_rules = 2; - - // If specified, shadow rules will emit stats with the given prefix. - // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with - // shadow rules. - string shadow_rules_stat_prefix = 5; - - // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; - - // RBAC enforcement strategy. By default RBAC will be enforced only once - // when the first byte of data arrives from the downstream. When used in - // conjunction with filters that emit dynamic metadata after decoding - // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to - // CONTINUOUS to enforce RBAC policies on every message boundary. - EnforcementType enforcement_type = 4; -} diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md b/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md deleted file mode 100644 index c6fb05d180635..0000000000000 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md +++ /dev/null @@ -1 +0,0 @@ -Protocol buffer definitions for the Rocketmq proxy. diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD deleted file mode 100644 index 06009f5f397fa..0000000000000 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto deleted file mode 100644 index 45a71da2f8dd3..0000000000000 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; - -import "envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; -option java_outer_classname = "RocketmqProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: RocketMQ Proxy] -// RocketMQ Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.rocketmq_proxy] - -message RocketmqProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The route table for the connection manager is specified in this property. - RouteConfiguration route_config = 2; - - // The largest duration transient object expected to live, more than 10s is recommended. - google.protobuf.Duration transient_object_life_span = 3; - - // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting - // facility without considering backward compatibility of exiting RocketMQ client SDK. - bool develop_mode = 4; -} diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto deleted file mode 100644 index 0925afef833d4..0000000000000 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto +++ /dev/null @@ -1,67 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rocketmq Proxy Route Configuration] -// Rocketmq Proxy :ref:`configuration overview `. - -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration"; - - // The name of the route configuration. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch"; - - // The name of the topic. - type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated config.route.v4alpha.HeaderMatcher headers = 2; -} - -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. - config.core.v4alpha.Metadata metadata_match = 2; -} diff --git a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/BUILD deleted file mode 100644 index 465ea4ff28449..0000000000000 --- a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v4alpha:pkg", - "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/sni_dynamic_forward_proxy.proto b/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/sni_dynamic_forward_proxy.proto deleted file mode 100644 index de2947fcba9ec..0000000000000 --- a/api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/sni_dynamic_forward_proxy.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v4alpha; - -import "envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v4alpha"; -option java_outer_classname = "SniDynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: SNI dynamic forward proxy] - -// Configuration for the SNI-based dynamic forward proxy filter. See the -// :ref:`architecture overview ` for -// more information. Note this filter must be configured along with -// :ref:`TLS inspector listener filter ` -// to work. -// [#extension: envoy.filters.network.sni_dynamic_forward_proxy] -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig"; - - // The DNS cache configuration that the filter will attach to. Note this - // configuration must match that of associated :ref:`dynamic forward proxy - // cluster configuration - // `. - common.dynamic_forward_proxy.v4alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; - - oneof port_specifier { - // The port number to connect to the upstream. - uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}]; - } -} diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD deleted file mode 100644 index 1b359dc7be526..0000000000000 --- a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto deleted file mode 100644 index 95f2c26c888ca..0000000000000 --- a/api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto +++ /dev/null @@ -1,154 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.tcp_proxy.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/v3/hash_policy.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha"; -option java_outer_classname = "TcpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: TCP Proxy] -// TCP Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.tcp_proxy] - -// [#next-free-field: 14] -message TcpProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; - - // Allows for specification of multiple upstream clusters along with weights - // that indicate the percentage of traffic to be forwarded to each cluster. - // The router selects an upstream cluster based on these weights. - message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster"; - - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; - - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When a request matches the route, the choice of an upstream cluster is - // determined by its weight. The sum of weights across all entries in the - // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what is set in this field will be considered - // for load balancing. Note that this will be merged with what's provided in - // :ref:`TcpProxy.metadata_match - // `, with values - // here taking precedence. The filter name should be specified as *envoy.lb*. - config.core.v4alpha.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Configuration for tunneling TCP over other transports or application layers. - // Tunneling is supported over both HTTP/1.1 and HTTP/2. Upstream protocol is - // determined by the cluster configuration. - message TunnelingConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; - - // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_len: 1}]; - - // Use POST method instead of CONNECT method to tunnel the TCP stream. - // The 'protocol: bytestream' header is also NOT set for HTTP/2 to comply with the spec. - // - // The upstream proxy is expected to convert POST payload as raw TCP. - bool use_post = 2; - - // Additional request headers to upstream proxy. This is mainly used to - // trigger upstream to convert POST requests back to CONNECT requests. - // - // Neither *:-prefixed* pseudo-headers nor the Host: header can be overridden. - repeated config.core.v4alpha.HeaderValueOption headers_to_add = 3 - [(validate.rules).repeated = {max_items: 1000}]; - } - - reserved 6; - - reserved "deprecated_v1"; - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - config.core.v4alpha.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set - // to 0s, the timeout will be disabled. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - google.protobuf.Duration idle_timeout = 8; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated config.accesslog.v4alpha.AccessLog access_log = 5; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based - // load balancing algorithms will select a host randomly. Currently the number of hash policies is - // limited to 1. - repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; - - // If set, this configures tunneling, e.g. configuration options to tunnel TCP payload over - // HTTP CONNECT. If this message is absent, the payload will be proxied upstream as per usual. - TunnelingConfig tunneling_config = 12; - - // The maximum duration of a connection. The duration is defined as the period since a connection - // was established. If not set, there is no max duration. When max_downstream_connection_duration - // is reached the connection will be closed. Duration must be at least 1ms. - google.protobuf.Duration max_downstream_connection_duration = 13 - [(validate.rules).duration = {gte {nanos: 1000000}}]; -} diff --git a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/BUILD b/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/BUILD deleted file mode 100644 index a58bc9ebda546..0000000000000 --- a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v4alpha:pkg", - "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/rate_limit.proto b/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/rate_limit.proto deleted file mode 100644 index ed2a33290268e..0000000000000 --- a/api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/rate_limit.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v4alpha; - -import "envoy/config/ratelimit/v4alpha/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v4alpha"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.ratelimit] - -// [#next-free-field: 6] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3.RateLimit"; - - // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string = {min_len: 1}]; - - // Specifies the rate limit configuration stage. Each configured rate limit filter performs a - // rate limit check using descriptors configured in the - // :ref:`envoy_v3_api_msg_extensions.filters.network.thrift_proxy.v3.RouteAction` for the request. - // Only those entries with a matching stage number are used for a given filter. If not set, the - // default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 3; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 4; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v4alpha.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index cf4c06ae1f19e..b79c9bc9619ea 100644 --- a/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -81,11 +81,33 @@ message RouteMatch { repeated config.route.v3.HeaderMatcher headers = 4; } -// [#next-free-field: 7] +// [#next-free-field: 8] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction"; + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // .. note:: + // + // Shadowing will not be triggered if the primary cluster does not exist. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration when the route configuration is loaded. + // If it disappears at runtime, the shadow request will silently be ignored. + string cluster = 1 [(validate.rules).string = {min_len: 1}]; + + // If not specified, all requests to the target cluster will be mirrored. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + config.core.v3.RuntimeFractionalPercent runtime_fraction = 2; + } + oneof cluster_specifier { option (validate.required) = true; @@ -123,6 +145,9 @@ message RouteAction { // Strip the service prefix from the method name, if there's a prefix. For // example, the method call Service:method would end up being just method. bool strip_service_name = 5; + + // Indicates that the route has request mirroring policies. + repeated RequestMirrorPolicy request_mirror_policies = 7; } // Allows for specification of multiple upstream clusters along with weights that indicate the diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD deleted file mode 100644 index 995c04093a7da..0000000000000 --- a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto deleted file mode 100644 index e638e9b8a2be8..0000000000000 --- a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto +++ /dev/null @@ -1,158 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Thrift Proxy Route Configuration] -// Thrift Proxy :ref:`configuration overview `. - -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration"; - - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch"; - - oneof match_specifier { - option (validate.required) = true; - - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. - string method_name = 1; - - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. - string service_name = 2; - } - - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. - bool invert = 3; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. - repeated config.route.v4alpha.HeaderMatcher headers = 4; -} - -// [#next-free-field: 7] -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.RouteAction"; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates a single upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 2; - - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. - string cluster_header = 6 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. - config.core.v4alpha.Metadata metadata_match = 3; - - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". - repeated config.route.v4alpha.RateLimit rate_limits = 4; - - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. - bool strip_service_name = 5; -} - -// Allows for specification of multiple upstream clusters along with weights that indicate the -// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster -// based on these weights. -message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster"; - - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; - - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When a request matches the route, the choice of an upstream cluster is determined by its - // weight. The sum of weights across all entries in the clusters array determines the total - // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field, combined with what's - // provided in :ref:`RouteAction's metadata_match - // `, - // will be considered. Values here will take precedence. Keys and values should be provided - // under the "envoy.lb" metadata key. - config.core.v4alpha.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto deleted file mode 100644 index de399582869a0..0000000000000 --- a/api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto +++ /dev/null @@ -1,140 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.v4alpha; - -import "envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; -option java_outer_classname = "ThriftProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Thrift Proxy] -// Thrift Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.thrift_proxy] - -// Thrift transport types supported by Envoy. -enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. - // For upstream connections, the Thrift proxy will use same transport as the downstream - // connection. - AUTO_TRANSPORT = 0; - - // The Thrift proxy will use the Thrift framed transport. - FRAMED = 1; - - // The Thrift proxy will use the Thrift unframed transport. - UNFRAMED = 2; - - // The Thrift proxy will assume the client is using the Thrift header transport. - HEADER = 3; -} - -// Thrift Protocol types supported by Envoy. -enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. - // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol - // detection. For upstream connections, the Thrift proxy will use the same protocol as the - // downstream connection. - AUTO_PROTOCOL = 0; - - // The Thrift proxy will use the Thrift binary protocol. - BINARY = 1; - - // The Thrift proxy will use Thrift non-strict binary protocol. - LAX_BINARY = 2; - - // The Thrift proxy will use the Thrift compact protocol. - COMPACT = 3; - - // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. - TWITTER = 4; -} - -// [#next-free-field: 8] -message ThriftProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy"; - - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - // [#extension-category: envoy.thrift_proxy.filters] - repeated ThriftFilter thrift_filters = 5; - - // If set to true, Envoy will try to skip decode data after metadata in the Thrift message. - // This mode will only work if the upstream and downstream protocols are the same and the transport - // is the same, the transport type is framed and the protocol is not Twitter. Otherwise Envoy will - // fallback to decode the data. - bool payload_passthrough = 6; - - // Optional maximum requests for a single downstream connection. If not specified, there is no limit. - google.protobuf.UInt32Value max_requests_per_connection = 7; -} - -// ThriftFilter configures a Thrift filter. -message ThriftFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter"; - - reserved 2; - - reserved "config"; - - // The name of the filter to instantiate. The name must match a supported - // filter. The built-in filters are: - // - // [#comment:TODO(zuercher): Auto generate the following list] - // * :ref:`envoy.filters.thrift.router ` - // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - oneof config_type { - google.protobuf.Any typed_config = 3; - } -} - -// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in -// in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.thrift_proxy`. -message ThriftProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions"; - - // Supplies the type of transport that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_TRANSPORT`, - // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_PROTOCOL`, - // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD deleted file mode 100644 index 28c2427c4a495..0000000000000 --- a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/data/dns/v4alpha:pkg", - "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto deleted file mode 100644 index 6957e58dbb068..0000000000000 --- a/api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto +++ /dev/null @@ -1,84 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.udp.dns_filter.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/resolver.proto"; -import "envoy/data/dns/v4alpha/dns_table.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig"; - - // This message contains the configuration for the DNS Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig"; - - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v4alpha.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - config.core.v4alpha.DataSource external_dns_table = 2; - } - } - - // This message contains the configuration for the DNS Filter operating - // in a client context. This message will contain the timeouts, retry, - // and forwarding configuration for Envoy to make DNS requests to other - // resolvers - message ClientContextConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig"; - - // Sets the maximum time we will wait for the upstream query to complete - // We allow 5s for the upstream resolution to complete, so the minimum - // value here is 1. Note that the total latency for a failed query is the - // number of retries multiplied by the resolver_timeout. - google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - config.core.v4alpha.DnsResolutionConfig dns_resolution_config = 2; - - // Controls how many outstanding external lookup contexts the filter tracks. - // The context structure allows the filter to respond to every query even if the external - // resolution times out or is otherwise unsuccessful - uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration contains the data that the filter uses to respond - // to DNS requests. - ServerContextConfig server_config = 2; - - // Client context configuration controls Envoy's behavior when it must use external - // resolvers to answer a query. This object is optional and if omitted instructs - // the filter to resolve queries from the data in the server_config - ClientContextConfig client_config = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/squash/v3/BUILD b/api/envoy/extensions/formatter/metadata/v3/BUILD similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/http/squash/v3/BUILD rename to api/envoy/extensions/formatter/metadata/v3/BUILD diff --git a/api/envoy/extensions/formatter/metadata/v3/metadata.proto b/api/envoy/extensions/formatter/metadata/v3/metadata.proto new file mode 100644 index 0000000000000..9b110a4893812 --- /dev/null +++ b/api/envoy/extensions/formatter/metadata/v3/metadata.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; + +package envoy.extensions.formatter.metadata.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.formatter.metadata.v3"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Formatter extension for printing various types of metadata] +// [#extension: envoy.formatter.metadata] + +// Metadata formatter extension implements METADATA command operator that +// prints all types of metadata. The first parameter taken by METADATA operator defines +// type of metadata. The following types of metadata are supported (case sensitive): +// +// * DYNAMIC +// * CLUSTER +// * ROUTE +// +// See :ref:`here ` for more information on access log configuration. + +// %METADATA(TYPE:NAMESPACE:KEY):Z% +// :ref:`Metadata ` info, +// where TYPE is type of metadata (see above for supported types), +// NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional +// lookup up key in the namespace with the option of specifying nested keys separated by ':', +// and Z is an optional parameter denoting string truncation up to Z characters long. +// The data will be logged as a JSON string. For example, for the following ROUTE metadata: +// +// ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` +// +// * %METADATA(ROUTE:com.test.my_filter)% will log: ``{"test_key": "foo", "test_object": {"inner_key": "bar"}}`` +// * %METADATA(ROUTE:com.test.my_filter:test_key)% will log: ``foo`` +// * %METADATA(ROUTE:com.test.my_filter:test_object)% will log: ``{"inner_key": "bar"}`` +// * %METADATA(ROUTE:com.test.my_filter:test_object:inner_key)% will log: ``bar`` +// * %METADATA(ROUTE:com.unknown_filter)% will log: ``-`` +// * %METADATA(ROUTE:com.test.my_filter:unknown_key)% will log: ``-`` +// * %METADATA(ROUTE:com.test.my_filter):25% will log (truncation at 25 characters): ``{"test_key": "foo", "test`` +// +// .. note:: +// +// For typed JSON logs, this operator renders a single value with string, numeric, or boolean type +// when the referenced key is a simple value. If the referenced key is a struct or list value, a +// JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum +// length is ignored. +// +// .. note:: +// +// METADATA(DYNAMIC:NAMESPACE:KEY):Z is equivalent to :ref:`DYNAMIC_METADATA(NAMESPACE:KEY):Z` +// METADATA(CLUSTER:NAMESPACE:KEY):Z is equivalent to :ref:`CLUSTER_METADATA(NAMASPACE:KEY):Z` + +message Metadata { +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/BUILD b/api/envoy/extensions/key_value/file_based/v3/BUILD similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/BUILD rename to api/envoy/extensions/key_value/file_based/v3/BUILD diff --git a/api/envoy/extensions/key_value/file_based/v3/config.proto b/api/envoy/extensions/key_value/file_based/v3/config.proto new file mode 100644 index 0000000000000..0eff4feb8f941 --- /dev/null +++ b/api/envoy/extensions/key_value/file_based/v3/config.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package envoy.extensions.key_value.file_based.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.key_value.file_based.v3"; +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: File Based Key Value Store storage plugin] + +// [#alpha:] +// [#extension: envoy.key_value.file_based] +// This is configuration to flush a key value store out to disk. +message FileBasedKeyValueStoreConfig { + // The filename to read the keys and values from, and write the keys and + // values to. + string filename = 1 [(validate.rules).string = {min_len: 1}]; + + // The interval at which the key value store should be flushed to the file. + google.protobuf.Duration flush_interval = 2; +} diff --git a/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto b/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto deleted file mode 100644 index f41c8added210..0000000000000 --- a/api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.datadog.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.datadog.v4alpha"; -option java_outer_classname = "DatadogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Datadog tracer] - -// Configuration for the Datadog tracer. -// [#extension: envoy.tracers.datadog] -message DatadogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.DatadogConfig"; - - // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_len: 1}]; -} diff --git a/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto b/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto deleted file mode 100644 index 21455a974d3be..0000000000000 --- a/api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.dynamic_ot.v4alpha; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.dynamic_ot.v4alpha"; -option java_outer_classname = "DynamicOtProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dynamically loadable OpenTracing tracer] - -// DynamicOtConfig is used to dynamically load a tracer from a shared library -// that implements the `OpenTracing dynamic loading API -// `_. -// [#extension: envoy.tracers.dynamic_ot] -message DynamicOtConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.DynamicOtConfig"; - - // Dynamic library implementing the `OpenTracing API - // `_. - string library = 1 [(validate.rules).string = {min_len: 1}]; - - // The configuration to use when creating a tracer from the given dynamic - // library. - google.protobuf.Struct config = 2; -} diff --git a/api/envoy/extensions/tracers/lightstep/v4alpha/BUILD b/api/envoy/extensions/tracers/lightstep/v4alpha/BUILD deleted file mode 100644 index 1d56979cc4660..0000000000000 --- a/api/envoy/extensions/tracers/lightstep/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto b/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto deleted file mode 100644 index 11d5b2ea84a91..0000000000000 --- a/api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.lightstep.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.lightstep.v4alpha"; -option java_outer_classname = "LightstepProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: LightStep tracer] - -// Configuration for the LightStep tracer. -// [#extension: envoy.tracers.lightstep] -message LightstepConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.LightstepConfig"; - - // Available propagation modes - enum PropagationMode { - // Propagate trace context in the single header x-ot-span-context. - ENVOY = 0; - - // Propagate trace context using LightStep's native format. - LIGHTSTEP = 1; - - // Propagate trace context using the b3 format. - B3 = 2; - - // Propagation trace context using the w3 trace-context standard. - TRACE_CONTEXT = 3; - } - - reserved 2; - - reserved "access_token_file"; - - // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Access token to the `LightStep `_ API. - config.core.v4alpha.DataSource access_token = 4; - - // Propagation modes to use by LightStep's tracer. - repeated PropagationMode propagation_modes = 3 - [(validate.rules).repeated = {items {enum {defined_only: true}}}]; -} diff --git a/api/envoy/extensions/tracers/opencensus/v4alpha/BUILD b/api/envoy/extensions/tracers/opencensus/v4alpha/BUILD deleted file mode 100644 index e43ed53877f47..0000000000000 --- a/api/envoy/extensions/tracers/opencensus/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", - ], -) diff --git a/api/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto b/api/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto deleted file mode 100644 index 03a5905a1bb9d..0000000000000 --- a/api/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.opencensus.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "opencensus/proto/trace/v1/trace_config.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.opencensus.v4alpha"; -option java_outer_classname = "OpencensusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: OpenCensus tracer] - -// Configuration for the OpenCensus tracer. -// [#next-free-field: 15] -// [#extension: envoy.tracers.opencensus] -message OpenCensusConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.OpenCensusConfig"; - - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - - reserved 7, 5, 6; - - reserved "zipkin_exporter_enabled", "zipkin_url"; - - // Configures tracing, e.g. the sampler, max number of annotations, etc. - .opencensus.proto.trace.v1.TraceConfig trace_config = 1; - - // Enables the stdout exporter if set to true. This is intended for debugging - // purposes. - bool stdout_exporter_enabled = 2; - - // Enables the Stackdriver exporter if set to true. The project_id must also - // be set. - bool stackdriver_exporter_enabled = 3; - - // The Cloud project_id to use for Stackdriver tracing. - string stackdriver_project_id = 4; - - // (optional) By default, the Stackdriver exporter will connect to production - // Stackdriver. If stackdriver_address is non-empty, it will instead connect - // to this address, which is in the gRPC format: - // https://github.com/grpc/grpc/blob/master/doc/naming.md - string stackdriver_address = 10; - - // (optional) The gRPC server that hosts Stackdriver tracing service. Only - // Google gRPC is supported. If :ref:`target_uri ` - // is not provided, the default production Stackdriver address will be used. - config.core.v4alpha.GrpcService stackdriver_grpc_service = 13; - - // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or - // ocagent_grpc_service must also be set. - bool ocagent_exporter_enabled = 11; - - // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC - // format: https://github.com/grpc/grpc/blob/master/doc/naming.md - // [#comment:TODO: deprecate this field] - string ocagent_address = 12; - - // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. - // This is only used if the ocagent_address is left empty. - config.core.v4alpha.GrpcService ocagent_grpc_service = 14; - - // List of incoming trace context headers we will accept. First one found - // wins. - repeated TraceContext incoming_trace_context = 8; - - // List of outgoing trace context headers we will produce. - repeated TraceContext outgoing_trace_context = 9; -} diff --git a/api/envoy/extensions/tracers/skywalking/v4alpha/BUILD b/api/envoy/extensions/tracers/skywalking/v4alpha/BUILD deleted file mode 100644 index 1d56979cc4660..0000000000000 --- a/api/envoy/extensions/tracers/skywalking/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/tracers/skywalking/v4alpha/skywalking.proto b/api/envoy/extensions/tracers/skywalking/v4alpha/skywalking.proto deleted file mode 100644 index 37936faa61337..0000000000000 --- a/api/envoy/extensions/tracers/skywalking/v4alpha/skywalking.proto +++ /dev/null @@ -1,68 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.skywalking.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.skywalking.v4alpha"; -option java_outer_classname = "SkywalkingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: SkyWalking tracer] - -// Configuration for the SkyWalking tracer. Please note that if SkyWalking tracer is used as the -// provider of http tracer, then -// :ref:`start_child_span ` -// in the router must be set to true to get the correct topology and tracing data. Moreover, SkyWalking -// Tracer does not support SkyWalking extension header (``sw8-x``) temporarily. -// [#extension: envoy.tracers.skywalking] -message SkyWalkingConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.SkyWalkingConfig"; - - // SkyWalking collector service. - config.core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; - - ClientConfig client_config = 2; -} - -// Client config for SkyWalking tracer. -message ClientConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.ClientConfig"; - - // Service name for SkyWalking tracer. If this field is empty, then local service cluster name - // that configured by :ref:`Bootstrap node ` - // message's :ref:`cluster ` field or command line - // option :option:`--service-cluster` will be used. If both this field and local service cluster - // name are empty, ``EnvoyProxy`` is used as the service name by default. - string service_name = 1; - - // Service instance name for SkyWalking tracer. If this field is empty, then local service node - // that configured by :ref:`Bootstrap node ` - // message's :ref:`id ` field or command line option - // :option:`--service-node` will be used. If both this field and local service node are empty, - // ``EnvoyProxy`` is used as the instance name by default. - string instance_name = 2; - - // Authentication token config for SkyWalking. SkyWalking can use token authentication to secure - // that monitoring application data can be trusted. In current version, Token is considered as a - // simple string. - // [#comment:TODO(wbpcode): Get backend token through the SDS API.] - oneof backend_token_specifier { - // Inline authentication token string. - string backend_token = 3 [(udpa.annotations.sensitive) = true]; - } - - // Envoy caches the segment in memory when the SkyWalking backend service is temporarily unavailable. - // This field specifies the maximum number of segments that can be cached. If not specified, the - // default is 1024. - google.protobuf.UInt32Value max_cache_size = 4; -} diff --git a/api/envoy/extensions/tracers/xray/v4alpha/BUILD b/api/envoy/extensions/tracers/xray/v4alpha/BUILD deleted file mode 100644 index 1d56979cc4660..0000000000000 --- a/api/envoy/extensions/tracers/xray/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/tracers/xray/v4alpha/xray.proto b/api/envoy/extensions/tracers/xray/v4alpha/xray.proto deleted file mode 100644 index 649f294b4273b..0000000000000 --- a/api/envoy/extensions/tracers/xray/v4alpha/xray.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.xray.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.xray.v4alpha"; -option java_outer_classname = "XrayProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: AWS X-Ray Tracer Configuration] -// Configuration for AWS X-Ray tracer - -// [#extension: envoy.tracers.xray] -message XRayConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.XRayConfig"; - - message SegmentFields { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.XRayConfig.SegmentFields"; - - // The type of AWS resource, e.g. "AWS::AppMesh::Proxy". - string origin = 1; - - // AWS resource metadata dictionary. - // See: `X-Ray Segment Document documentation `__ - google.protobuf.Struct aws = 2; - } - - // The UDP endpoint of the X-Ray Daemon where the spans will be sent. - // If this value is not set, the default value of 127.0.0.1:2000 will be used. - config.core.v4alpha.SocketAddress daemon_endpoint = 1; - - // The name of the X-Ray segment. - string segment_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The location of a local custom sampling rules JSON file. - // For an example of the sampling rules see: - // `X-Ray SDK documentation - // `_ - config.core.v4alpha.DataSource sampling_rule_manifest = 3; - - // Optional custom fields to be added to each trace segment. - // see: `X-Ray Segment Document documentation - // `__ - SegmentFields segment_fields = 4; -} diff --git a/api/envoy/extensions/tracers/zipkin/v4alpha/BUILD b/api/envoy/extensions/tracers/zipkin/v4alpha/BUILD deleted file mode 100644 index aefd915ae0546..0000000000000 --- a/api/envoy/extensions/tracers/zipkin/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto b/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto deleted file mode 100644 index 93ffefc483907..0000000000000 --- a/api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.zipkin.v4alpha; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.zipkin.v4alpha"; -option java_outer_classname = "ZipkinProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Zipkin tracer] - -// Configuration for the Zipkin tracer. -// [#extension: envoy.tracers.zipkin] -// [#next-free-field: 7] -message ZipkinConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.ZipkinConfig"; - - // Available Zipkin collector endpoint versions. - enum CollectorEndpointVersion { - // Zipkin API v1, JSON over HTTP. - // [#comment: The default implementation of Zipkin client before this field is added was only v1 - // and the way user configure this was by not explicitly specifying the version. Consequently, - // before this is added, the corresponding Zipkin collector expected to receive v1 payload. - // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when - // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, - // since in Zipkin realm this v1 version is considered to be not preferable anymore.] - DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 - [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - - // Zipkin API v2, JSON over HTTP. - HTTP_JSON = 1; - - // Zipkin API v2, protobuf over HTTP. - HTTP_PROTO = 2; - - // [#not-implemented-hide:] - GRPC = 3; - } - - // The cluster manager cluster that hosts the Zipkin collectors. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - - // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be - // used. - CollectorEndpointVersion collector_endpoint_version = 5; - - // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors - // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. - string collector_hostname = 6; -} diff --git a/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD deleted file mode 100644 index 976cefd189cca..0000000000000 --- a/api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/quic/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto b/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto deleted file mode 100644 index 9a5f096f56c7a..0000000000000 --- a/api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.quic.v4alpha; - -import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha"; -option java_outer_classname = "QuicTransportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: quic transport] -// [#comment:#extension: envoy.transport_sockets.quic] - -// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. -message QuicDownstreamTransport { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport"; - - tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1 - [(validate.rules).message = {required: true}]; -} - -// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. -message QuicUpstreamTransport { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport"; - - tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/extensions/transport_sockets/starttls/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/starttls/v4alpha/BUILD deleted file mode 100644 index b160d85ddb5b1..0000000000000 --- a/api/envoy/extensions/transport_sockets/starttls/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/starttls/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/transport_sockets/starttls/v4alpha/starttls.proto b/api/envoy/extensions/transport_sockets/starttls/v4alpha/starttls.proto deleted file mode 100644 index d2a9dbeaf2ed4..0000000000000 --- a/api/envoy/extensions/transport_sockets/starttls/v4alpha/starttls.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.starttls.v4alpha; - -import "envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.starttls.v4alpha"; -option java_outer_classname = "StarttlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: StartTls] -// [#extension: envoy.transport_sockets.starttls] - -// StartTls transport socket addresses situations when a protocol starts in clear-text and -// negotiates an in-band switch to TLS. StartTls transport socket is protocol agnostic. In the -// case of downstream StartTls a network filter is required which understands protocol exchange -// and a state machine to signal to the StartTls transport socket when a switch to TLS is -// required. Similarly, upstream StartTls requires the owner of an upstream transport socket to -// manage the state machine necessary to properly coordinate negotiation with the upstream and -// signal to the transport socket when a switch to secure transport is required. - -// Configuration for a downstream StartTls transport socket. -// StartTls transport socket wraps two sockets: -// * raw_buffer socket which is used at the beginning of the session -// * TLS socket used when a protocol negotiates a switch to encrypted traffic. -message StartTlsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.starttls.v3.StartTlsConfig"; - - // (optional) Configuration for clear-text socket used at the beginning of the session. - raw_buffer.v3.RawBuffer cleartext_socket_config = 1; - - // Configuration for a downstream TLS socket. - transport_sockets.tls.v4alpha.DownstreamTlsContext tls_socket_config = 2 - [(validate.rules).message = {required: true}]; -} - -// Configuration for an upstream StartTls transport socket. -// StartTls transport socket wraps two sockets: -// * raw_buffer socket which is used at the beginning of the session -// * TLS socket used when a protocol negotiates a switch to encrypted traffic. -message UpstreamStartTlsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.starttls.v3.UpstreamStartTlsConfig"; - - // (optional) Configuration for clear-text socket used at the beginning of the session. - raw_buffer.v3.RawBuffer cleartext_socket_config = 1; - - // Configuration for an upstream TLS socket. - transport_sockets.tls.v4alpha.UpstreamTlsContext tls_socket_config = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/extensions/transport_sockets/tap/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/tap/v4alpha/BUILD deleted file mode 100644 index fe393f574d0d9..0000000000000 --- a/api/envoy/extensions/transport_sockets/tap/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/common/tap/v4alpha:pkg", - "//envoy/extensions/transport_sockets/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto b/api/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto deleted file mode 100644 index 5e0efc403ab5d..0000000000000 --- a/api/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tap.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/extensions/common/tap/v4alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v4alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap] -// [#extension: envoy.transport_sockets.tap] - -// Configuration for tap transport socket. This wraps another transport socket, providing the -// ability to interpose and record in plain text any traffic that is surfaced to Envoy. -message Tap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tap.v3.Tap"; - - // Common configuration for the tap transport socket. - common.tap.v4alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; - - // The underlying transport socket being wrapped. - config.core.v4alpha.TransportSocket transport_socket = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/extensions/transport_sockets/tls/v3/BUILD b/api/envoy/extensions/transport_sockets/tls/v3/BUILD index 4485559ad11dd..47b9b9ae57e96 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/BUILD +++ b/api/envoy/extensions/transport_sockets/tls/v3/BUILD @@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/annotations:pkg", "//envoy/api/v2/auth:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", diff --git a/api/envoy/extensions/transport_sockets/tls/v3/common.proto b/api/envoy/extensions/transport_sockets/tls/v3/common.proto index aa05a31f23d90..82dcb37cd7ca0 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/common.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -9,6 +9,7 @@ import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -232,7 +233,27 @@ message TlsSessionTicketKeys { [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; } -// [#next-free-field: 13] +// Indicates a certificate to be obtained from a named CertificateProvider plugin instance. +// The plugin instances are defined in the client's bootstrap file. +// The plugin allows certificates to be fetched/refreshed over the network asynchronously with +// respect to the TLS handshake. +// [#not-implemented-hide:] +message CertificateProviderPluginInstance { + // Provider instance name. If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; +} + +// [#next-free-field: 14] message CertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CertificateValidationContext"; @@ -279,7 +300,20 @@ message CertificateValidationContext { // directory for any file moves to support rotation. This currently only // applies to dynamic secrets, when the *CertificateValidationContext* is // delivered via SDS. - config.core.v3.DataSource trusted_ca = 1; + // + // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. + // + // [#next-major-version: This field and watched_directory below should ideally be moved into a + // separate sub-message, since there's no point in specifying the latter field without this one.] + config.core.v3.DataSource trusted_ca = 1 + [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; + + // Certificate provider instance for fetching TLS certificates. + // + // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. + // [#not-implemented-hide:] + CertificateProviderPluginInstance ca_certificate_provider_instance = 13 + [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; // If specified, updates of a file-based *trusted_ca* source will be triggered // by this watch. This allows explicit control over the path watched, by diff --git a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto index 02287de5875fb..f680207955a8c 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -9,7 +9,7 @@ import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/migrate.proto"; +import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -125,12 +125,18 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 14] +// [#next-free-field: 15] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; // Config for Certificate provider to get certificates. This provider should allow certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // + // DEPRECATED: This message is not currently used, but if we ever do need it, we will want to + // move it out of CommonTlsContext and into common.proto, similar to the existing + // CertificateProviderPluginInstance message. + // + // [#not-implemented-hide:] message CertificateProvider { // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. @@ -151,6 +157,11 @@ message CommonTlsContext { // Similar to CertificateProvider above, but allows the provider instances to be configured on // the client side instead of being sent from the control plane. + // + // DEPRECATED: This message was moved outside of CommonTlsContext + // and now lives in common.proto. + // + // [#not-implemented-hide:] message CertificateProviderInstance { // Provider instance name. This name must be defined in the client's configuration (e.g., a // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config @@ -179,26 +190,20 @@ message CommonTlsContext { // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. - SdsSecretConfig validation_context_sds_secret_config = 2 [ - (validate.rules).message = {required: true}, - (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" - ]; + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; - // Certificate provider for fetching validation context. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. + // Certificate provider for fetching CA certs. This will populate the + // *default_validation_context.trusted_ca* field. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 3 - [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - // Certificate provider instance for fetching validation context. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. + // Certificate provider instance for fetching CA certs. This will populate the + // *default_validation_context.trusted_ca* field. // [#not-implemented-hide:] CertificateProviderInstance validation_context_certificate_provider_instance = 4 - [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; } reserved 5; @@ -212,6 +217,12 @@ message CommonTlsContext { // Only a single TLS certificate is supported in client contexts. In server contexts, the first // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is // used for clients that support ECDSA. + // + // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, + // and *tls_certificate_provider_instance* may be used. + // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's + // not legal to put a repeated field in a oneof. In the next major version, we should rework + // this to avoid this problem.] repeated TlsCertificate tls_certificates = 2; // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be @@ -220,18 +231,30 @@ message CommonTlsContext { // The same number and types of certificates as :ref:`tls_certificates ` // are valid in the the certificates fetched through this setting. // - // If :ref:`tls_certificates ` - // is non-empty, this field is ignored. + // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, + // and *tls_certificate_provider_instance* may be used. + // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's + // not legal to put a repeated field in a oneof. In the next major version, we should rework + // this to avoid this problem.] repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 2}]; + // Certificate provider instance for fetching TLS certs. + // + // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, + // and *tls_certificate_provider_instance* may be used. + // [#not-implemented-hide:] + CertificateProviderPluginInstance tls_certificate_provider_instance = 14; + // Certificate provider for fetching TLS certificates. // [#not-implemented-hide:] - CertificateProvider tls_certificate_certificate_provider = 9; + CertificateProvider tls_certificate_certificate_provider = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Certificate provider instance for fetching TLS certificates. // [#not-implemented-hide:] - CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; oneof validation_context_type { // How to validate peer certificates. @@ -252,11 +275,13 @@ message CommonTlsContext { // Certificate provider for fetching validation context. // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 10; + CertificateProvider validation_context_certificate_provider = 10 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Certificate provider instance for fetching validation context. // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 12; + CertificateProviderInstance validation_context_certificate_provider_instance = 12 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; } // Supplies the list of ALPN protocols that the listener should expose. In diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD deleted file mode 100644 index 0cf3219ca2cdc..0000000000000 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto deleted file mode 100644 index e696fffc5e57d..0000000000000 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto +++ /dev/null @@ -1,404 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.TlsParameters"; - - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for - // servers. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). - // - // If not specified, a default list will be used. Defaults are different for server (downstream) and - // client (upstream) TLS configurations. - // - // In non-FIPS builds, the default server cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default server cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In non-FIPS builds, the default client cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // - // In builds using :ref:`BoringSSL FIPS `, the default client cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider"; - - reserved 2; - - reserved "config"; - - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_len: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 8] -message TlsCertificate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.TlsCertificate"; - - // The TLS certificate chain. - // - // If *certificate_chain* is a filesystem path, a watch will be added to the - // parent directory for any file moves to support rotation. This currently - // only applies to dynamic secrets, when the *TlsCertificate* is delivered via - // SDS. - config.core.v4alpha.DataSource certificate_chain = 1; - - // The TLS private key. - // - // If *private_key* is a filesystem path, a watch will be added to the parent - // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *TlsCertificate* is delivered via SDS. - config.core.v4alpha.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // If specified, updates of file-based *certificate_chain* and *private_key* - // sources will be triggered by this watch. The certificate/key pair will be - // read together and validated for atomic read consistency (i.e. no - // intervening modification occurred between cert/key read, verified by file - // hash comparisons). This allows explicit control over the path watched, by - // default the parent directories of the filesystem paths in - // *certificate_chain* and *private_key* are watched if this field is not - // specified. This only applies when a *TlsCertificate* is delivered by SDS - // with references to filesystem paths. See the :ref:`SDS key rotation - // ` documentation for further details. - config.core.v4alpha.WatchedDirectory watched_directory = 7; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // The OCSP response to be stapled with this certificate during the handshake. - // The response must be DER-encoded and may only be provided via ``filename`` or - // ``inline_bytes``. The response may pertain to only one certificate. - config.core.v4alpha.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated config.core.v4alpha.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys"; - - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated config.core.v4alpha.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 13] -message CertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext"; - - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - reserved 4, 5; - - reserved "verify_subject_alt_name"; - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. Note - // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be - // provided for all certificate authorities in that chain. Failure to do so will result in - // verification failure for both revoked and unrevoked certificates from that chain. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - // - // If *trusted_ca* is a filesystem path, a watch will be added to the parent - // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *CertificateValidationContext* is - // delivered via SDS. - config.core.v4alpha.DataSource trusted_ca = 1; - - // If specified, updates of a file-based *trusted_ca* source will be triggered - // by this watch. This allows explicit control over the path watched, by - // default the parent directory of the filesystem path in *trusted_ca* is - // watched if this field is not specified. This only applies when a - // *CertificateValidationContext* is delivered by SDS with references to - // filesystem paths. See the :ref:`SDS key rotation ` - // documentation for further details. - config.core.v4alpha.WatchedDirectory watched_directory = 11; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative name matchers. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matchers. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. Note that if a CRL is provided - // for any certificate authority in a trust chain, a CRL must be provided - // for all certificate authorities in that chain. Failure to do so will - // result in verification failure for both revoked and unrevoked certificates - // from that chain. - config.core.v4alpha.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; - - // The configuration of an extension specific certificate validator. - // If specified, all validation is done by the specified validator, - // and the behavior of all other validation settings is defined by the specified validator (and may be entirely ignored, unused, and unvalidated). - // Refer to the documentation for the specified validator. If you do not want a custom validation algorithm, do not set this field. - // [#extension-category: envoy.tls.cert_validator] - config.core.v4alpha.TypedExtensionConfig custom_validator_config = 12; -} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto deleted file mode 100644 index 5bb8c86b94385..0000000000000 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "SecretProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Secrets configuration] - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; - - // Name by which the secret can be uniquely referred to. When both name and config are specified, - // then secret can be fetched and/or reloaded via SDS. When only name is specified, then secret - // will be loaded from static resources. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - config.core.v4alpha.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto deleted file mode 100644 index b92cae619dd9c..0000000000000 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto +++ /dev/null @@ -1,282 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v4alpha; - -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "TlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: TLS transport socket] -// [#extension: envoy.transport_sockets.tls] -// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 9] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; - - enum OcspStaplePolicy { - // OCSP responses are optional. If an OCSP response is absent - // or expired, the associated certificate will be used for - // connections without an OCSP staple. - LENIENT_STAPLING = 0; - - // OCSP responses are optional. If an OCSP response is absent, - // the associated certificate will be used without an - // OCSP staple. If a response is provided but is expired, - // the associated certificate will not be used for - // subsequent connections. If no suitable certificate is found, - // the connection is rejected. - STRICT_STAPLING = 1; - - // OCSP responses are required. Configuration will fail if - // a certificate is provided without an OCSP response. If a - // response expires, the associated certificate will not be - // used connections. If no suitable certificate is found, the - // connection is rejected. - MUST_STAPLE = 2; - } - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; - - // Config for whether to use certificates if they do not have - // an accompanying OCSP response or if the response expires at runtime. - // Defaults to LENIENT_STAPLING - OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 14] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; - - // Config for Certificate provider to get certificates. This provider should allow certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - message CertificateProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider"; - - // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Provider specific config. - // Note: an implementation is expected to dedup multiple instances of the same config - // to maintain a single certificate-provider instance. The sharing can happen, for - // example, among multiple clusters or between the tls_certificate and validation_context - // certificate providers of a cluster. - // This config could be supplied inline or (in future) a named xDS resource. - oneof config { - option (validate.required) = true; - - config.core.v4alpha.TypedExtensionConfig typed_config = 2; - } - } - - // Similar to CertificateProvider above, but allows the provider instances to be configured on - // the client side instead of being sent from the control plane. - message CertificateProviderInstance { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance"; - - // Provider instance name. This name must be defined in the client's configuration (e.g., a - // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config - // field that would be sent in the CertificateProvider message if the config was sent by the - // control plane). If not present, defaults to "default". - // - // Instance names should generally be defined not in terms of the underlying provider - // implementation (e.g., "file_watcher") but rather in terms of the function of the - // certificates (e.g., "foo_deployment_identity"). - string instance_name = 1; - - // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "example.com" to specify a certificate for a - // particular domain. Not all provider instances will actually use this field, so the value - // defaults to the empty string. - string certificate_name = 2; - } - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." - "CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - oneof dynamic_validation_context { - // Config for fetching validation context via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - - // Certificate provider for fetching validation context. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. - // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 3; - - // Certificate provider instance for fetching validation context. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. - // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 4; - } - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // - // The same number and types of certificates as :ref:`tls_certificates ` - // are valid in the the certificates fetched through this setting. - // - // If :ref:`tls_certificates ` - // is non-empty, this field is ignored. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 2}]; - - // Certificate provider for fetching TLS certificates. - // [#not-implemented-hide:] - CertificateProvider tls_certificate_certificate_provider = 9; - - // Certificate provider instance for fetching TLS certificates. - // [#not-implemented-hide:] - CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - - // Certificate provider for fetching validation context. - // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 10; - - // Certificate provider instance for fetching validation context. - // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 12; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; - - // Custom TLS handshaker. If empty, defaults to native TLS handshaking - // behavior. - config.core.v4alpha.TypedExtensionConfig custom_handshaker = 13; -} diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls_spiffe_validator_config.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/tls_spiffe_validator_config.proto deleted file mode 100644 index 8191318930be6..0000000000000 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/tls_spiffe_validator_config.proto +++ /dev/null @@ -1,66 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "TlsSpiffeValidatorConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: SPIFFE Certificate Validator] -// [#extension: envoy.tls.cert_validator.spiffe] - -// Configuration specific to the `SPIFFE `_ certificate validator. -// -// Example: -// -// .. validated-code-block:: yaml -// :type-name: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext -// -// custom_validator_config: -// name: envoy.tls.cert_validator.spiffe -// typed_config: -// "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig -// trust_domains: -// - name: foo.com -// trust_bundle: -// filename: "foo.pem" -// - name: envoy.com -// trust_bundle: -// filename: "envoy.pem" -// -// In this example, a presented peer certificate whose SAN matches `spiffe//foo.com/**` is validated against -// the "foo.pem" x.509 certificate. All the trust bundles are isolated from each other, so no trust domain can mint -// a SVID belonging to another trust domain. That means, in this example, a SVID signed by `envoy.com`'s CA with `spiffe//foo.com/**` -// SAN would be rejected since Envoy selects the trust bundle according to the presented SAN before validate the certificate. -// -// Note that SPIFFE validator inherits and uses the following options from :ref:`CertificateValidationContext `. -// -// - :ref:`allow_expired_certificate ` to allow expired certificates. -// - :ref:`match_subject_alt_names ` to match **URI** SAN of certificates. Unlike the default validator, SPIFFE validator only matches **URI** SAN (which equals to SVID in SPIFFE terminology) and ignore other SAN types. -// -message SPIFFECertValidatorConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig"; - - message TrustDomain { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.TrustDomain"; - - // Name of the trust domain, `example.com`, `foo.bar.gov` for example. - // Note that this must *not* have "spiffe://" prefix. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Specify a data source holding x.509 trust bundle used for validating incoming SVID(s) in this trust domain. - config.core.v4alpha.DataSource trust_bundle = 2; - } - - // This field specifies trust domains used for validating incoming X.509-SVID(s). - repeated TrustDomain trust_domains = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto index f99f4059166b5..271dcfbe49cec 100644 --- a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto +++ b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto @@ -77,6 +77,9 @@ message HttpProtocolOptions { // If this is used, the cluster can use either of the configured protocols, and // will use whichever protocol was used by the downstream connection. + // + // If HTTP/3 is configured for downstream and not configured for upstream, + // HTTP/3 requests will fail over to HTTP/2. message UseDownstreamHttpConfig { config.core.v3.Http1ProtocolOptions http_protocol_options = 1; diff --git a/api/envoy/extensions/upstreams/http/v4alpha/BUILD b/api/envoy/extensions/upstreams/http/v4alpha/BUILD deleted file mode 100644 index 3b00c0d6e6f2f..0000000000000 --- a/api/envoy/extensions/upstreams/http/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/upstreams/http/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/extensions/upstreams/http/v4alpha/http_protocol_options.proto b/api/envoy/extensions/upstreams/http/v4alpha/http_protocol_options.proto deleted file mode 100644 index 10971c2587f04..0000000000000 --- a/api/envoy/extensions/upstreams/http/v4alpha/http_protocol_options.proto +++ /dev/null @@ -1,161 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.v4alpha; - -import "envoy/config/core/v4alpha/protocol.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.v4alpha"; -option java_outer_classname = "HttpProtocolOptionsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP Protocol Options] -// [#extension: envoy.upstreams.http.http_protocol_options] - -// HttpProtocolOptions specifies Http upstream protocol options. This object -// is used in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.extensions.upstreams.http.v3.HttpProtocolOptions`. -// -// This controls what protocol(s) should be used for upstream and how said protocol(s) are configured. -// -// This replaces the prior pattern of explicit protocol configuration directly -// in the cluster. So a configuration like this, explicitly configuring the use of HTTP/2 upstream: -// -// .. code:: -// -// clusters: -// - name: some_service -// connect_timeout: 5s -// upstream_http_protocol_options: -// auto_sni: true -// common_http_protocol_options: -// idle_timeout: 1s -// http2_protocol_options: -// max_concurrent_streams: 100 -// .... [further cluster config] -// -// Would now look like this: -// -// .. code:: -// -// clusters: -// - name: some_service -// connect_timeout: 5s -// typed_extension_protocol_options: -// envoy.extensions.upstreams.http.v3.HttpProtocolOptions: -// "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions -// upstream_http_protocol_options: -// auto_sni: true -// common_http_protocol_options: -// idle_timeout: 1s -// explicit_http_config: -// http2_protocol_options: -// max_concurrent_streams: 100 -// .... [further cluster config] -// [#next-free-field: 6] -message HttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.upstreams.http.v3.HttpProtocolOptions"; - - // If this is used, the cluster will only operate on one of the possible upstream protocols. - // Note that HTTP/2 or above should generally be used for upstream gRPC clusters. - message ExplicitHttpConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.upstreams.http.v3.HttpProtocolOptions.ExplicitHttpConfig"; - - oneof protocol_config { - option (validate.required) = true; - - config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 2; - - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - config.core.v4alpha.Http3ProtocolOptions http3_protocol_options = 3; - } - } - - // If this is used, the cluster can use either of the configured protocols, and - // will use whichever protocol was used by the downstream connection. - message UseDownstreamHttpConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.upstreams.http.v3.HttpProtocolOptions.UseDownstreamHttpConfig"; - - config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 2; - - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - config.core.v4alpha.Http3ProtocolOptions http3_protocol_options = 3; - } - - // If this is used, the cluster can use either HTTP/1 or HTTP/2, and will use whichever - // protocol is negotiated by ALPN with the upstream. - // Clusters configured with *AutoHttpConfig* will use the highest available - // protocol; HTTP/2 if supported, otherwise HTTP/1. - // If the upstream does not support ALPN, *AutoHttpConfig* will fail over to HTTP/1. - // This can only be used with transport sockets which support ALPN. Using a - // transport socket which does not support ALPN will result in configuration - // failure. The transport layer may be configured with custom ALPN, but the default ALPN - // for the cluster (or if custom ALPN fails) will be "h2,http/1.1". - message AutoHttpConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.upstreams.http.v3.HttpProtocolOptions.AutoHttpConfig"; - - config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 2; - - // Unlike HTTP/1 and HTTP/2, HTTP/3 will not be configured unless it is - // present, and (soon) only if there is an indication of server side - // support. - // See :ref:`here ` for more information on - // when HTTP/3 will be used, and when Envoy will fail over to TCP. - // - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - // AutoHttpConfig config is undergoing especially rapid change and as it - // is alpha is not guaranteed to be API-stable. - config.core.v4alpha.Http3ProtocolOptions http3_protocol_options = 3; - - // [#not-implemented-hide:] - // The presence of alternate protocols cache options causes the use of the - // alternate protocols cache, which is responsible for parsing and caching - // HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that - // advertise supporting it. - // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled. - config.core.v4alpha.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 4; - } - - // This contains options common across HTTP/1 and HTTP/2 - config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 1; - - // This contains common protocol options which are only applied upstream. - config.core.v4alpha.UpstreamHttpProtocolOptions upstream_http_protocol_options = 2; - - // This controls the actual protocol to be used upstream. - oneof upstream_protocol_options { - option (validate.required) = true; - - // To explicitly configure either HTTP/1 or HTTP/2 (but not both!) use *explicit_http_config*. - // If the *explicit_http_config* is empty, HTTP/1.1 is used. - ExplicitHttpConfig explicit_http_config = 3; - - // This allows switching on protocol based on what protocol the downstream - // connection used. - UseDownstreamHttpConfig use_downstream_protocol_config = 4; - - // This allows switching on protocol based on ALPN - AutoHttpConfig auto_config = 5; - } -} diff --git a/api/envoy/service/accesslog/v4alpha/BUILD b/api/envoy/service/accesslog/v4alpha/BUILD deleted file mode 100644 index 94c70bc66967b..0000000000000 --- a/api/envoy/service/accesslog/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/data/accesslog/v3:pkg", - "//envoy/service/accesslog/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/accesslog/v4alpha/als.proto b/api/envoy/service/accesslog/v4alpha/als.proto deleted file mode 100644 index ab0ba0e15213e..0000000000000 --- a/api/envoy/service/accesslog/v4alpha/als.proto +++ /dev/null @@ -1,87 +0,0 @@ -syntax = "proto3"; - -package envoy.service.accesslog.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/data/accesslog/v3/accesslog.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.accesslog.v4alpha"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Service for streaming access logs from Envoy to an access log server. -service AccessLogService { - // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different - // API for "critical" access logs in which Envoy will buffer access logs for some period of time - // until it gets an ACK so it could then retry. This API is designed for high throughput with the - // expectation that it might be lossy. - rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { - } -} - -// Empty response for the StreamAccessLogs API. Will never be sent. See below. -message StreamAccessLogsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsResponse"; -} - -// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream -// access logs without ever expecting a response. -message StreamAccessLogsMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsMessage.Identifier"; - - // The node sending the access log messages over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - - // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig - // `. - string log_name = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Wrapper for batches of HTTP access log entries. - message HTTPAccessLogEntries { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsMessage.HTTPAccessLogEntries"; - - repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Wrapper for batches of TCP access log entries. - message TCPAccessLogEntries { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsMessage.TCPAccessLogEntries"; - - repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batches of log entries of a single type. Generally speaking, a given stream should only - // ever include one type of log entry. - oneof log_entries { - option (validate.required) = true; - - HTTPAccessLogEntries http_logs = 2; - - TCPAccessLogEntries tcp_logs = 3; - } -} diff --git a/api/envoy/service/auth/v4alpha/BUILD b/api/envoy/service/auth/v4alpha/BUILD deleted file mode 100644 index 0c2b40ee253b1..0000000000000 --- a/api/envoy/service/auth/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/auth/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/auth/v4alpha/attribute_context.proto b/api/envoy/service/auth/v4alpha/attribute_context.proto deleted file mode 100644 index eed7a2e704ad0..0000000000000 --- a/api/envoy/service/auth/v4alpha/attribute_context.proto +++ /dev/null @@ -1,177 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v4alpha"; -option java_outer_classname = "AttributeContextProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Attribute Context ] - -// See :ref:`network filter configuration overview ` -// and :ref:`HTTP filter configuration overview `. - -// An attribute is a piece of metadata that describes an activity on a network. -// For example, the size of an HTTP request, or the status code of an HTTP response. -// -// Each attribute has a type and a name, which is logically defined as a proto message field -// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes -// supported by Envoy authorization system. -// [#comment: The following items are left out of this proto -// Request.Auth field for jwt tokens -// Request.Api for api management -// Origin peer that originated the request -// Caching Protocol -// request_context return values to inject back into the filter chain -// peer.claims -- from X.509 extensions -// Configuration -// - field mask to send -// - which return values from request_context are copied back -// - which return values are copied into request_headers] -// [#next-free-field: 12] -message AttributeContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.AttributeContext"; - - // This message defines attributes for a node that handles a network request. - // The node can be either a service or an application that sends, forwards, - // or receives the request. Service peers should fill in the `service`, - // `principal`, and `labels` as appropriate. - // [#next-free-field: 6] - message Peer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.AttributeContext.Peer"; - - // The address of the peer, this is typically the IP address. - // It can also be UDS path, or others. - config.core.v4alpha.Address address = 1; - - // The canonical service name of the peer. - // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster - // ` - // If a more trusted source of the service name is available through mTLS/secure naming, it - // should be used. - string service = 2; - - // The labels associated with the peer. - // These could be pod labels for Kubernetes or tags for VMs. - // The source of the labels could be an X.509 certificate or other configuration. - map labels = 3; - - // The authenticated identity of this peer. - // For example, the identity associated with the workload such as a service account. - // If an X.509 certificate is used to assert the identity this field should be sourced from - // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. - // The primary identity should be the principal. The principal format is issuer specific. - // - // Example: - // * SPIFFE format is `spiffe://trust-domain/path` - // * Google account format is `https://accounts.google.com/{userid}` - string principal = 4; - - // The X.509 certificate used to authenticate the identify of this peer. - // When present, the certificate contents are encoded in URL and PEM format. - string certificate = 5; - } - - // Represents a network request, such as an HTTP request. - message Request { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.AttributeContext.Request"; - - // The timestamp when the proxy receives the first byte of the request. - google.protobuf.Timestamp time = 1; - - // Represents an HTTP request or an HTTP-like request. - HttpRequest http = 2; - } - - // This message defines attributes for an HTTP request. - // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 13] - message HttpRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.AttributeContext.HttpRequest"; - - // The unique ID for a request, which can be propagated to downstream - // systems. The ID should have low probability of collision - // within a single day for a specific service. - // For HTTP requests, it should be X-Request-ID or equivalent. - string id = 1; - - // The HTTP request method, such as `GET`, `POST`. - string method = 2; - - // The HTTP request headers. If multiple headers share the same key, they - // must be merged according to the HTTP spec. All header keys must be - // lower-cased, because HTTP header keys are case-insensitive. - map headers = 3; - - // The request target, as it appears in the first line of the HTTP request. This includes - // the URL path and query-string. No decoding is performed. - string path = 4; - - // The HTTP request `Host` or 'Authority` header value. - string host = 5; - - // The HTTP URL scheme, such as `http` and `https`. - string scheme = 6; - - // This field is always empty, and exists for compatibility reasons. The HTTP URL query is - // included in `path` field. - string query = 7; - - // This field is always empty, and exists for compatibility reasons. The URL fragment is - // not submitted as part of HTTP requests; it is unknowable. - string fragment = 8; - - // The HTTP request size in bytes. If unknown, it must be -1. - int64 size = 9; - - // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". - // - // See :repo:`headers.h:ProtocolStrings ` for a list of all - // possible values. - string protocol = 10; - - // The HTTP request body. - string body = 11; - - // The HTTP request body in bytes. This is used instead of - // :ref:`body ` when - // :ref:`pack_as_bytes ` - // is set to true. - bytes raw_body = 12; - } - - // The source of a network activity, such as starting a TCP connection. - // In a multi hop network activity, the source represents the sender of the - // last hop. - Peer source = 1; - - // The destination of a network activity, such as accepting a TCP connection. - // In a multi hop network activity, the destination represents the receiver of - // the last hop. - Peer destination = 2; - - // Represents a network request, such as an HTTP request. - Request request = 4; - - // This is analogous to http_request.headers, however these contents will not be sent to the - // upstream server. Context_extensions provide an extension mechanism for sending additional - // information to the auth server without modifying the proto definition. It maps to the - // internal opaque context in the filter chain. - map context_extensions = 10; - - // Dynamic metadata associated with the request. - config.core.v4alpha.Metadata metadata_context = 11; -} diff --git a/api/envoy/service/auth/v4alpha/external_auth.proto b/api/envoy/service/auth/v4alpha/external_auth.proto deleted file mode 100644 index f368516c302e6..0000000000000 --- a/api/envoy/service/auth/v4alpha/external_auth.proto +++ /dev/null @@ -1,130 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/service/auth/v4alpha/attribute_context.proto"; -import "envoy/type/v3/http_status.proto"; - -import "google/protobuf/struct.proto"; -import "google/rpc/status.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v4alpha"; -option java_outer_classname = "ExternalAuthProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Authorization Service ] - -// The authorization service request messages used by external authorization :ref:`network filter -// ` and :ref:`HTTP filter `. - -// A generic interface for performing authorization check on incoming -// requests to a networked service. -service Authorization { - // Performs authorization check based on the attributes associated with the - // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse) { - } -} - -message CheckRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.CheckRequest"; - - // The request attributes. - AttributeContext attributes = 1; -} - -// HTTP attributes for a denied response. -message DeniedHttpResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.DeniedHttpResponse"; - - // This field allows the authorization service to send a HTTP response status - // code to the downstream client other than 403 (Forbidden). - type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client. Note that the :ref:`append field in HeaderValueOption ` defaults to - // false when used in this message. - repeated config.core.v4alpha.HeaderValueOption headers = 2; - - // This field allows the authorization service to send a response body data - // to the downstream client. - string body = 3; -} - -// HTTP attributes for an OK response. -// [#next-free-field: 7] -message OkHttpResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.OkHttpResponse"; - - reserved 3; - - reserved "dynamic_metadata"; - - // HTTP entity headers in addition to the original request headers. This allows the authorization - // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. Note that the :ref:`append field in HeaderValueOption ` defaults to - // false when used in this message. By setting the `append` field to `true`, - // the filter will append the correspondent header value to the matched request header. - // By leaving `append` as false, the filter will either add a new header, or override an existing - // one if there is a match. - repeated config.core.v4alpha.HeaderValueOption headers = 2; - - // HTTP entity headers to remove from the original request before dispatching - // it to the upstream. This allows the authorization service to act on auth - // related headers (like `Authorization`), process them, and consume them. - // Under this model, the upstream will either receive the request (if it's - // authorized) or not receive it (if it's not), but will not see headers - // containing authorization credentials. - // - // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as - // the header `Host`, may not be removed as that would make the request - // malformed. If mentioned in `headers_to_remove` these special headers will - // be ignored. - // - // When using the HTTP service this must instead be set by the HTTP - // authorization service as a comma separated list like so: - // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. - repeated string headers_to_remove = 5; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client on success. Note that the :ref:`append field in HeaderValueOption ` - // defaults to false when used in this message. - repeated config.core.v4alpha.HeaderValueOption response_headers_to_add = 6; -} - -// Intended for gRPC and Network Authorization servers `only`. -message CheckResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.CheckResponse"; - - // Status `OK` allows the request. Any other status indicates the request should be denied. - google.rpc.Status status = 1; - - // An message that contains HTTP response attributes. This message is - // used when the authorization service needs to send custom responses to the - // downstream client or, to modify/add request headers being dispatched to the upstream. - oneof http_response { - // Supplies http attributes for a denied response. - DeniedHttpResponse denied_response = 2; - - // Supplies http attributes for an ok response. - OkHttpResponse ok_response = 3; - } - - // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next - // filter. This metadata lives in a namespace specified by the canonical name of extension filter - // that requires it: - // - // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. - // - :ref:`envoy.filters.network.ext_authz ` for network filter. - google.protobuf.Struct dynamic_metadata = 4; -} diff --git a/api/envoy/service/discovery/v4alpha/BUILD b/api/envoy/service/discovery/v4alpha/BUILD deleted file mode 100644 index 2de065dc5b393..0000000000000 --- a/api/envoy/service/discovery/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/discovery/v4alpha/ads.proto b/api/envoy/service/discovery/v4alpha/ads.proto deleted file mode 100644 index 41435811bd17f..0000000000000 --- a/api/envoy/service/discovery/v4alpha/ads.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v4alpha; - -import "envoy/service/discovery/v4alpha/discovery.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v4alpha"; -option java_outer_classname = "AdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Aggregated Discovery Service (ADS)] - -// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, -// and listeners are retained in the package `envoy.api.v2` for backwards -// compatibility with existing management servers. New development in discovery -// services should proceed in the package `envoy.service.discovery.v2`. - -// See https://github.com/lyft/envoy-api#apis for a description of the role of -// ADS and how it is intended to be used by a management server. ADS requests -// have the same structure as their singleton xDS counterparts, but can -// multiplex many resource types on a single stream. The type_url in the -// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover -// the multiplexed singleton APIs at the Envoy instance and management server. -service AggregatedDiscoveryService { - // This is a gRPC-only API. - rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest) - returns (stream DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.AdsDummy"; -} diff --git a/api/envoy/service/discovery/v4alpha/discovery.proto b/api/envoy/service/discovery/v4alpha/discovery.proto deleted file mode 100644 index bf8d48fc7a374..0000000000000 --- a/api/envoy/service/discovery/v4alpha/discovery.proto +++ /dev/null @@ -1,286 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/rpc/status.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v4alpha"; -option java_outer_classname = "DiscoveryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common discovery API components] - -// A DiscoveryRequest requests a set of versioned resources of the same type for -// a given Envoy node on some API. -// [#next-free-field: 7] -message DiscoveryRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.DiscoveryRequest"; - - // The version_info provided in the request messages will be the version_info - // received with the most recent successfully processed response or empty on - // the first request. It is expected that no new request is sent after a - // response is received until the Envoy instance is ready to ACK/NACK the new - // configuration. ACK/NACK takes place by returning the new API config version - // as applied or the previous API config version respectively. Each type_url - // (see below) has an independent version associated with it. - string version_info = 1; - - // The node making the request. - config.core.v4alpha.Node node = 2; - - // List of resources to subscribe to, e.g. list of cluster names or a route - // configuration name. If this is empty, all resources for the API are - // returned. LDS/CDS may have empty resource_names, which will cause all - // resources for the Envoy instance to be returned. The LDS and CDS responses - // will then imply a number of resources that need to be fetched via EDS/RDS, - // which will be explicitly enumerated in resource_names. - repeated string resource_names = 3; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit - // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is - // required for ADS. - string type_url = 4; - - // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above - // discussion on version_info and the DiscoveryResponse nonce comment. This - // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, - // or 2) the client has not yet accepted an update in this xDS stream (unlike - // delta, where it is populated only for new explicit ACKs). - string response_nonce = 5; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* provides the Envoy - // internal exception related to the failure. It is only intended for consumption during manual - // debugging, the string provided is not guaranteed to be stable across Envoy versions. - google.rpc.Status error_detail = 6; -} - -// [#next-free-field: 7] -message DiscoveryResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.DiscoveryResponse"; - - // The version of the response data. - string version_info = 1; - - // The response resources. These resources are typed and depend on the API being called. - repeated google.protobuf.Any resources = 2; - - // [#not-implemented-hide:] - // Canary is used to support two Envoy command line flags: - // - // * --terminate-on-canary-transition-failure. When set, Envoy is able to - // terminate if it detects that configuration is stuck at canary. Consider - // this example sequence of updates: - // - Management server applies a canary config successfully. - // - Management server rolls back to a production config. - // - Envoy rejects the new production config. - // Since there is no sensible way to continue receiving configuration - // updates, Envoy will then terminate and apply production config from a - // clean slate. - // * --dry-run-canary. When set, a canary response will never be applied, only - // validated via a dry run. - bool canary = 3; - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). - string type_url = 4; - - // For gRPC based subscriptions, the nonce provides a way to explicitly ack a - // specific DiscoveryResponse in a following DiscoveryRequest. Additional - // messages may have been sent by Envoy to the management server for the - // previous version on the stream prior to this DiscoveryResponse, that were - // unprocessed at response send time. The nonce allows the management server - // to ignore any further DiscoveryRequests for the previous version until a - // DiscoveryRequest bearing the nonce. The nonce is optional and is not - // required for non-stream based xDS implementations. - string nonce = 5; - - // The control plane instance that sent the response. - config.core.v4alpha.ControlPlane control_plane = 6; -} - -// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC -// endpoint for Delta xDS. -// -// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full -// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a -// diff to the state of a xDS client. -// In Delta XDS there are per-resource versions, which allow tracking state at -// the resource granularity. -// An xDS Delta session is always in the context of a gRPC bidirectional -// stream. This allows the xDS server to keep track of the state of xDS clients -// connected to it. -// -// In Delta xDS the nonce field is required and used to pair -// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. -// Optionally, a response message level system_version_info is present for -// debugging purposes only. -// -// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest -// can be either or both of: [1] informing the server of what resources the -// client has gained/lost interest in (using resource_names_subscribe and -// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from -// the server (using response_nonce, with presence of error_detail making it a NACK). -// Additionally, the first message (for a given type_url) of a reconnected gRPC stream -// has a third role: informing the server of the resources (and their versions) -// that the client already possesses, using the initial_resource_versions field. -// -// As with state-of-the-world, when multiple resource types are multiplexed (ADS), -// all requests/acknowledgments/updates are logically walled off by type_url: -// a Cluster ACK exists in a completely separate world from a prior Route NACK. -// In particular, initial_resource_versions being sent at the "start" of every -// gRPC stream actually entails a message for each type_url, each with its own -// initial_resource_versions. -// [#next-free-field: 8] -message DeltaDiscoveryRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.DeltaDiscoveryRequest"; - - // The node making the request. - config.core.v4alpha.Node node = 1; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if - // resources are only referenced via *xds_resource_subscribe* and - // *xds_resources_unsubscribe*. - string type_url = 2; - - // DeltaDiscoveryRequests allow the client to add or remove individual - // resources to the set of tracked resources in the context of a stream. - // All resource names in the resource_names_subscribe list are added to the - // set of tracked resources and all resource names in the resource_names_unsubscribe - // list are removed from the set of tracked resources. - // - // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or - // resource_names_unsubscribe list simply means that no resources are to be - // added or removed to the resource list. - // *Like* state-of-the-world xDS, the server must send updates for all tracked - // resources, but can also send updates for resources the client has not subscribed to. - // - // NOTE: the server must respond with all resources listed in resource_names_subscribe, - // even if it believes the client has the most recent version of them. The reason: - // the client may have dropped them, but then regained interest before it had a chance - // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. - // - // These two fields can be set in any DeltaDiscoveryRequest, including ACKs - // and initial_resource_versions. - // - // A list of Resource names to add to the list of tracked resources. - repeated string resource_names_subscribe = 3; - - // A list of Resource names to remove from the list of tracked resources. - repeated string resource_names_unsubscribe = 4; - - // Informs the server of the versions of the resources the xDS client knows of, to enable the - // client to continue the same logical xDS session even in the face of gRPC stream reconnection. - // It will not be populated: [1] in the very first stream of a session, since the client will - // not yet have any resources, [2] in any message after the first in a stream (for a given - // type_url), since the server will already be correctly tracking the client's state. - // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) - // The map's keys are names of xDS resources known to the xDS client. - // The map's values are opaque resource versions. - map initial_resource_versions = 5; - - // When the DeltaDiscoveryRequest is a ACK or NACK message in response - // to a previous DeltaDiscoveryResponse, the response_nonce must be the - // nonce in the DeltaDiscoveryResponse. - // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. - string response_nonce = 6; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* - // provides the Envoy internal exception related to the failure. - google.rpc.Status error_detail = 7; -} - -// [#next-free-field: 8] -message DeltaDiscoveryResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.DeltaDiscoveryResponse"; - - // The version of the response data (used for debugging). - string system_version_info = 1; - - // The response resources. These are typed resources, whose types must match - // the type_url field. - repeated Resource resources = 2; - - // field id 3 IS available! - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. - string type_url = 4; - - // Resources names of resources that have be deleted and to be removed from the xDS Client. - // Removed resources for missing resources can be ignored. - repeated string removed_resources = 6; - - // The nonce provides a way for DeltaDiscoveryRequests to uniquely - // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - string nonce = 5; - - // [#not-implemented-hide:] - // The control plane instance that sent the response. - config.core.v4alpha.ControlPlane control_plane = 7; -} - -// [#next-free-field: 8] -message Resource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.Resource"; - - // Cache control properties for the resource. - // [#not-implemented-hide:] - message CacheControl { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.Resource.CacheControl"; - - // If true, xDS proxies may not cache this resource. - // Note that this does not apply to clients other than xDS proxies, which must cache resources - // for their own use, regardless of the value of this field. - bool do_not_cache = 1; - } - - // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; - - // The aliases are a list of other names that this resource can go by. - repeated string aliases = 4; - - // The resource level version. It allows xDS to track the state of individual - // resources. - string version = 1; - - // The resource being tracked. - google.protobuf.Any resource = 2; - - // Time-to-live value for the resource. For each resource, a timer is started. The timer is - // reset each time the resource is received with a new TTL. If the resource is received with - // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the - // configuration for the resource will be removed. - // - // The TTL can be refreshed or changed by sending a response that doesn't change the resource - // version. In this case the resource field does not need to be populated, which allows for - // light-weight "heartbeat" updates to keep a resource with a TTL alive. - // - // The TTL feature is meant to support configurations that should be removed in the event of - // a management server failure. For example, the feature may be used for fault injection - // testing where the fault injection should be terminated in the event that Envoy loses contact - // with the management server. - google.protobuf.Duration ttl = 6; - - // Cache control properties for the resource. - // [#not-implemented-hide:] - CacheControl cache_control = 7; -} diff --git a/api/envoy/service/endpoint/v3/leds.proto b/api/envoy/service/endpoint/v3/leds.proto new file mode 100644 index 0000000000000..89172f487eba0 --- /dev/null +++ b/api/envoy/service/endpoint/v3/leds.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.service.endpoint.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; +option java_outer_classname = "LedsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#not-implemented-hide:] +// [#protodoc-title: LEDS] +// Locality-Endpoint discovery +// [#comment:TODO(adisuissa): Link to unified matching docs: +// :ref:`architecture overview`] + +service LocalityEndpointDiscoveryService { + option (envoy.annotations.resource).type = "envoy.config.endpoint.v3.LbEndpoint"; + + // State-of-the-World (DiscoveryRequest) and REST are not supported. + + // The resource_names_subscribe resource_names_unsubscribe fields in DeltaDiscoveryRequest + // specify a list of glob collections to subscribe to updates for. + rpc DeltaLocalityEndpoints(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } +} + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message LedsDummy { +} diff --git a/api/envoy/service/event_reporting/v4alpha/BUILD b/api/envoy/service/event_reporting/v4alpha/BUILD deleted file mode 100644 index 7f342132a86d9..0000000000000 --- a/api/envoy/service/event_reporting/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/event_reporting/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/event_reporting/v4alpha/event_reporting_service.proto b/api/envoy/service/event_reporting/v4alpha/event_reporting_service.proto deleted file mode 100644 index 6bff2a09c25ba..0000000000000 --- a/api/envoy/service/event_reporting/v4alpha/event_reporting_service.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; - -package envoy.service.event_reporting.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.event_reporting.v4alpha"; -option java_outer_classname = "EventReportingServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC Event Reporting Service] - -// [#not-implemented-hide:] -// Service for streaming different types of events from Envoy to a server. The examples of -// such events may be health check or outlier detection events. -service EventReportingService { - // Envoy will connect and send StreamEventsRequest messages forever. - // The management server may send StreamEventsResponse to configure event stream. See below. - // This API is designed for high throughput with the expectation that it might be lossy. - rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { - } -} - -// [#not-implemented-hide:] -// An events envoy sends to the management server. -message StreamEventsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v3.StreamEventsRequest"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v3.StreamEventsRequest.Identifier"; - - // The node sending the event messages over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batch of events. When the stream is already active, it will be the events occurred - // since the last message had been sent. If the server receives unknown event type, it should - // silently ignore it. - // - // The following events are supported: - // - // * :ref:`HealthCheckEvent ` - // * :ref:`OutlierDetectionEvent ` - repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// [#not-implemented-hide:] -// The management server may send envoy a StreamEventsResponse to tell which events the server -// is interested in. In future, with aggregated event reporting service, this message will -// contain, for example, clusters the envoy should send events for, or event types the server -// wants to process. -message StreamEventsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v3.StreamEventsResponse"; -} diff --git a/api/envoy/service/health/v3/hds.proto b/api/envoy/service/health/v3/hds.proto index bb8781d5c3958..51266a64fa959 100644 --- a/api/envoy/service/health/v3/hds.proto +++ b/api/envoy/service/health/v3/hds.proto @@ -186,3 +186,8 @@ message HealthCheckSpecifier { // The default is 1 second. google.protobuf.Duration interval = 2; } + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message HdsDummy { +} diff --git a/api/envoy/service/health/v4alpha/BUILD b/api/envoy/service/health/v4alpha/BUILD deleted file mode 100644 index 60bd19511855e..0000000000000 --- a/api/envoy/service/health/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/cluster/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/service/health/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/health/v4alpha/hds.proto b/api/envoy/service/health/v4alpha/hds.proto deleted file mode 100644 index 9ce239f5e9cf9..0000000000000 --- a/api/envoy/service/health/v4alpha/hds.proto +++ /dev/null @@ -1,192 +0,0 @@ -syntax = "proto3"; - -package envoy.service.health.v4alpha; - -import "envoy/config/cluster/v4alpha/cluster.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/health_check.proto"; -import "envoy/config/endpoint/v3/endpoint_components.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.health.v4alpha"; -option java_outer_classname = "HdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Health Discovery Service (HDS)] - -// HDS is Health Discovery Service. It compliments Envoy’s health checking -// service by designating this Envoy to be a healthchecker for a subset of hosts -// in the cluster. The status of these health checks will be reported to the -// management server, where it can be aggregated etc and redistributed back to -// Envoy through EDS. -service HealthDiscoveryService { - // 1. Envoy starts up and if its can_healthcheck option in the static - // bootstrap config is enabled, sends HealthCheckRequest to the management - // server. It supplies its capabilities (which protocol it can health check - // with, what zone it resides in, etc.). - // 2. In response to (1), the management server designates this Envoy as a - // healthchecker to health check a subset of all upstream hosts for a given - // cluster (for example upstream Host 1 and Host 2). It streams - // HealthCheckSpecifier messages with cluster related configuration for all - // clusters this Envoy is designated to health check. Subsequent - // HealthCheckSpecifier message will be sent on changes to: - // a. Endpoints to health checks - // b. Per cluster configuration change - // 3. Envoy creates a health probe based on the HealthCheck config and sends - // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck - // configuration Envoy waits upon the arrival of the probe response and - // looks at the content of the response to decide whether the endpoint is - // healthy or not. If a response hasn't been received within the timeout - // interval, the endpoint health status is considered TIMEOUT. - // 4. Envoy reports results back in an EndpointHealthResponse message. - // Envoy streams responses as often as the interval configured by the - // management server in HealthCheckSpecifier. - // 5. The management Server collects health statuses for all endpoints in the - // cluster (for all clusters) and uses this information to construct - // EndpointDiscoveryResponse messages. - // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load - // balances traffic to them without additional health checking. It may - // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection - // failed to a particular endpoint to account for health status propagation - // delay between HDS and EDS). - // By default, can_healthcheck is true. If can_healthcheck is false, Cluster - // configuration may not contain HealthCheck message. - // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above - // invariant? - // TODO(htuch): Add @amb67's diagram. - rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) - returns (stream HealthCheckSpecifier) { - } - - // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of - // request/response. Should we add an identifier to the HealthCheckSpecifier - // to bind with the response? - rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { - option (google.api.http).post = "/v3/discovery:health_check"; - option (google.api.http).body = "*"; - } -} - -// Defines supported protocols etc, so the management server can assign proper -// endpoints to healthcheck. -message Capability { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.Capability"; - - // Different Envoy instances may have different capabilities (e.g. Redis) - // and/or have ports enabled for different protocols. - enum Protocol { - HTTP = 0; - TCP = 1; - REDIS = 2; - } - - repeated Protocol health_check_protocols = 1; -} - -message HealthCheckRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.HealthCheckRequest"; - - config.core.v4alpha.Node node = 1; - - Capability capability = 2; -} - -message EndpointHealth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.EndpointHealth"; - - config.endpoint.v3.Endpoint endpoint = 1; - - config.core.v4alpha.HealthStatus health_status = 2; -} - -// Group endpoint health by locality under each cluster. -message LocalityEndpointsHealth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.LocalityEndpointsHealth"; - - config.core.v4alpha.Locality locality = 1; - - repeated EndpointHealth endpoints_health = 2; -} - -// The health status of endpoints in a cluster. The cluster name and locality -// should match the corresponding fields in ClusterHealthCheck message. -message ClusterEndpointsHealth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.ClusterEndpointsHealth"; - - string cluster_name = 1; - - repeated LocalityEndpointsHealth locality_endpoints_health = 2; -} - -message EndpointHealthResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.EndpointHealthResponse"; - - reserved 1; - - reserved "endpoints_health"; - - // Organize Endpoint health information by cluster. - repeated ClusterEndpointsHealth cluster_endpoints_health = 2; -} - -message HealthCheckRequestOrEndpointHealthResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse"; - - oneof request_type { - HealthCheckRequest health_check_request = 1; - - EndpointHealthResponse endpoint_health_response = 2; - } -} - -message LocalityEndpoints { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.LocalityEndpoints"; - - config.core.v4alpha.Locality locality = 1; - - repeated config.endpoint.v3.Endpoint endpoints = 2; -} - -// The cluster name and locality is provided to Envoy for the endpoints that it -// health checks to support statistics reporting, logging and debugging by the -// Envoy instance (outside of HDS). For maximum usefulness, it should match the -// same cluster structure as that provided by EDS. -message ClusterHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.ClusterHealthCheck"; - - string cluster_name = 1; - - repeated config.core.v4alpha.HealthCheck health_checks = 2; - - repeated LocalityEndpoints locality_endpoints = 3; - - // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria ` - // on connection when health checking. For more details, see - // :ref:`config.cluster.v3.Cluster.transport_socket_matches `. - repeated config.cluster.v4alpha.Cluster.TransportSocketMatch transport_socket_matches = 4; -} - -message HealthCheckSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.HealthCheckSpecifier"; - - repeated ClusterHealthCheck cluster_health_checks = 1; - - // The default is 1 second. - google.protobuf.Duration interval = 2; -} diff --git a/api/envoy/service/load_stats/v4alpha/BUILD b/api/envoy/service/load_stats/v4alpha/BUILD deleted file mode 100644 index 91d914645041b..0000000000000 --- a/api/envoy/service/load_stats/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/service/load_stats/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/load_stats/v4alpha/lrs.proto b/api/envoy/service/load_stats/v4alpha/lrs.proto deleted file mode 100644 index f99b6555f4a17..0000000000000 --- a/api/envoy/service/load_stats/v4alpha/lrs.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -package envoy.service.load_stats.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/endpoint/v3/load_report.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.load_stats.v4alpha"; -option java_outer_classname = "LrsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Load Reporting service (LRS)] - -// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional -// stream with a management server. Upon connecting, the management server can send a -// :ref:`LoadStatsResponse ` to a node it is -// interested in getting the load reports for. Envoy in this node will start sending -// :ref:`LoadStatsRequest `. This is done periodically -// based on the :ref:`load reporting interval ` -// For details, take a look at the :ref:`Load Reporting Service sandbox example `. - -service LoadReportingService { - // Advanced API to allow for multi-dimensional load balancing by remote - // server. For receiving LB assignments, the steps are: - // 1, The management server is configured with per cluster/zone/load metric - // capacity configuration. The capacity configuration definition is - // outside of the scope of this document. - // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters - // to balance. - // - // Independently, Envoy will initiate a StreamLoadStats bidi stream with a - // management server: - // 1. Once a connection establishes, the management server publishes a - // LoadStatsResponse for all clusters it is interested in learning load - // stats about. - // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts - // based on per-zone weights and/or per-instance weights (if specified) - // based on intra-zone LbPolicy. This information comes from the above - // {Stream,Fetch}Endpoints. - // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. - // 4. Envoy aggregates load reports over the period of time given to it in - // LoadStatsResponse.load_reporting_interval. This includes aggregation - // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as - // well as load metrics from upstream hosts. - // 5. When the timer of load_reporting_interval expires, Envoy sends new - // LoadStatsRequest filled with load reports for each cluster. - // 6. The management server uses the load reports from all reported Envoys - // from around the world, computes global assignment and prepares traffic - // assignment destined for each zone Envoys are located in. Goto 2. - rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { - } -} - -// A load report Envoy sends to the management server. -message LoadStatsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.load_stats.v3.LoadStatsRequest"; - - // Node identifier for Envoy instance. - config.core.v4alpha.Node node = 1; - - // A list of load stats to report. - repeated config.endpoint.v3.ClusterStats cluster_stats = 2; -} - -// The management server sends envoy a LoadStatsResponse with all clusters it -// is interested in learning load stats about. -message LoadStatsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.load_stats.v3.LoadStatsResponse"; - - // Clusters to report stats for. - // Not populated if *send_all_clusters* is true. - repeated string clusters = 1; - - // If true, the client should send all clusters it knows about. - // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their - // :ref:`client_features` field will honor this field. - bool send_all_clusters = 4; - - // The minimum interval of time to collect stats over. This is only a minimum for two reasons: - // - // 1. There may be some delay from when the timer fires until stats sampling occurs. - // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - // that is observed in between the corresponding previous *LoadStatsRequest* and this - // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period - // of inobservability that might otherwise exists between the messages. New clusters are not - // subject to this consideration. - google.protobuf.Duration load_reporting_interval = 2; - - // Set to *true* if the management server supports endpoint granularity - // report. - bool report_endpoint_granularity = 3; -} diff --git a/api/envoy/service/metrics/v4alpha/BUILD b/api/envoy/service/metrics/v4alpha/BUILD deleted file mode 100644 index 285d31cf31d46..0000000000000 --- a/api/envoy/service/metrics/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/metrics/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@prometheus_metrics_model//:client_model", - ], -) diff --git a/api/envoy/service/metrics/v4alpha/metrics_service.proto b/api/envoy/service/metrics/v4alpha/metrics_service.proto deleted file mode 100644 index 5e1412f103e93..0000000000000 --- a/api/envoy/service/metrics/v4alpha/metrics_service.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.service.metrics.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "io/prometheus/client/metrics.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.metrics.v4alpha"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Metrics service] - -// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric -// data model as a standard to represent metrics information. -service MetricsService { - // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. - rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { - } -} - -message StreamMetricsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v3.StreamMetricsResponse"; -} - -message StreamMetricsMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v3.StreamMetricsMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v3.StreamMetricsMessage.Identifier"; - - // The node sending metrics over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // A list of metric entries - repeated io.prometheus.client.MetricFamily envoy_metrics = 2; -} diff --git a/api/envoy/service/status/v4alpha/BUILD b/api/envoy/service/status/v4alpha/BUILD deleted file mode 100644 index ddcf51e3b2652..0000000000000 --- a/api/envoy/service/status/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/admin/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/status/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/status/v4alpha/csds.proto b/api/envoy/service/status/v4alpha/csds.proto deleted file mode 100644 index 9680c6feacf79..0000000000000 --- a/api/envoy/service/status/v4alpha/csds.proto +++ /dev/null @@ -1,185 +0,0 @@ -syntax = "proto3"; - -package envoy.service.status.v4alpha; - -import "envoy/admin/v4alpha/config_dump.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/matcher/v4alpha/node.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.status.v4alpha"; -option java_outer_classname = "CsdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Client Status Discovery Service (CSDS)] - -// CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. It can -// also be used to get the current xDS states directly from the client. -service ClientStatusDiscoveryService { - rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { - } - - rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { - option (google.api.http).post = "/v3/discovery:client_status"; - option (google.api.http).body = "*"; - } -} - -// Status of a config from a management server view. -enum ConfigStatus { - // Status info is not available/unknown. - UNKNOWN = 0; - - // Management server has sent the config to client and received ACK. - SYNCED = 1; - - // Config is not sent. - NOT_SENT = 2; - - // Management server has sent the config to client but hasn’t received - // ACK/NACK. - STALE = 3; - - // Management server has sent the config to client but received NACK. The - // attached config dump will be the latest config (the rejected one), since - // it is the persisted version in the management server. - ERROR = 4; -} - -// Config status from a client-side view. -enum ClientConfigStatus { - // Config status is not available/unknown. - CLIENT_UNKNOWN = 0; - - // Client requested the config but hasn't received any config from management - // server yet. - CLIENT_REQUESTED = 1; - - // Client received the config and replied with ACK. - CLIENT_ACKED = 2; - - // Client received the config and replied with NACK. Notably, the attached - // config dump is not the NACKed version, but the most recent accepted one. If - // no config is accepted yet, the attached config dump will be empty. - CLIENT_NACKED = 3; -} - -// Request for client status of clients identified by a list of NodeMatchers. -message ClientStatusRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.ClientStatusRequest"; - - // Management server can use these match criteria to identify clients. - // The match follows OR semantics. - repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1; - - // The node making the csds request. - config.core.v4alpha.Node node = 2; -} - -// Detailed config (per xDS) with status. -// [#next-free-field: 8] -message PerXdsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.PerXdsConfig"; - - reserved 7; - - reserved "client_status"; - - // Config status generated by management servers. Will not be present if the - // CSDS server is an xDS client. - ConfigStatus status = 1; - - oneof per_xds_config { - admin.v4alpha.ListenersConfigDump listener_config = 2; - - admin.v4alpha.ClustersConfigDump cluster_config = 3; - - admin.v4alpha.RoutesConfigDump route_config = 4; - - admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; - - admin.v4alpha.EndpointsConfigDump endpoint_config = 6; - } -} - -// All xds configs for a particular client. -message ClientConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.ClientConfig"; - - // GenericXdsConfig is used to specify the config status and the dump - // of any xDS resource identified by their type URL. It is the generalized - // version of the now deprecated ListenersConfigDump, ClustersConfigDump etc - // [#next-free-field: 10] - message GenericXdsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.ClientConfig.GenericXdsConfig"; - - // Type_url represents the fully qualified name of xDS resource type - // like envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. - string type_url = 1; - - // Name of the xDS resource - string name = 2; - - // This is the :ref:`version_info ` - // in the last processed xDS discovery response. If there are only - // static bootstrap listeners, this field will be "" - string version_info = 3; - - // The xDS resource config. Actual content depends on the type - google.protobuf.Any xds_config = 4; - - // Timestamp when the xDS resource was last updated - google.protobuf.Timestamp last_updated = 5; - - // Per xDS resource config status. It is generated by management servers. - // It will not be present if the CSDS server is an xDS client. - ConfigStatus config_status = 6; - - // Per xDS resource status from the view of a xDS client - admin.v4alpha.ClientResourceStatus client_status = 7; - - // Set if the last update failed, cleared after the next successful - // update. The *error_state* field contains the rejected version of - // this particular resource along with the reason and timestamp. For - // successfully updated or acknowledged resource, this field should - // be empty. - // [#not-implemented-hide:] - admin.v4alpha.UpdateFailureState error_state = 8; - - // Is static resource is true if it is specified in the config supplied - // through the file at the startup. - bool is_static_resource = 9; - } - - reserved 2; - - reserved "xds_config"; - - // Node for a particular client. - config.core.v4alpha.Node node = 1; - - // Represents generic xDS config and the exact config structure depends on - // the type URL (like Cluster if it is CDS) - repeated GenericXdsConfig generic_xds_configs = 3; -} - -message ClientStatusResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.ClientStatusResponse"; - - // Client configs for the clients specified in the ClientStatusRequest. - repeated ClientConfig config = 1; -} diff --git a/api/envoy/service/tap/v4alpha/BUILD b/api/envoy/service/tap/v4alpha/BUILD deleted file mode 100644 index cb89a6907d9ab..0000000000000 --- a/api/envoy/service/tap/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/data/tap/v3:pkg", - "//envoy/service/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/service/tap/v4alpha/tap.proto b/api/envoy/service/tap/v4alpha/tap.proto deleted file mode 100644 index 4ef38d1bae983..0000000000000 --- a/api/envoy/service/tap/v4alpha/tap.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/data/tap/v3/wrapper.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap Sink Service] - -// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call -// StreamTaps to deliver captured taps to the server -service TapSinkService { - // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. - rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { - } -} - -// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server -// and stream taps without ever expecting a response. -message StreamTapsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v3.StreamTapsRequest"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v3.StreamTapsRequest.Identifier"; - - // The node sending taps over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - - // The opaque identifier that was set in the :ref:`output config - // `. - string tap_id = 2; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // The trace id. this can be used to merge together a streaming trace. Note that the trace_id - // is not guaranteed to be spatially or temporally unique. - uint64 trace_id = 2; - - // The trace data. - data.tap.v3.TraceWrapper trace = 3; -} - -// [#not-implemented-hide:] -message StreamTapsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v3.StreamTapsResponse"; -} diff --git a/api/envoy/service/trace/v4alpha/BUILD b/api/envoy/service/trace/v4alpha/BUILD deleted file mode 100644 index df379cbe9d5da..0000000000000 --- a/api/envoy/service/trace/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", - ], -) diff --git a/api/envoy/service/trace/v4alpha/trace_service.proto b/api/envoy/service/trace/v4alpha/trace_service.proto deleted file mode 100644 index 4cfdbbe576df9..0000000000000 --- a/api/envoy/service/trace/v4alpha/trace_service.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.service.trace.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "opencensus/proto/trace/v1/trace.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.trace.v4alpha"; -option java_outer_classname = "TraceServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Trace service] - -// Service for streaming traces to server that consumes the trace data. It -// uses OpenCensus data model as a standard to represent trace information. -service TraceService { - // Envoy will connect and send StreamTracesMessage messages forever. It does - // not expect any response to be sent as nothing would be done in the case - // of failure. - rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { - } -} - -message StreamTracesResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v3.StreamTracesResponse"; -} - -message StreamTracesMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v3.StreamTracesMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v3.StreamTracesMessage.Identifier"; - - // The node sending the access log messages over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. - // As a performance optimization this will only be sent in the first message - // on the stream. - Identifier identifier = 1; - - // A list of Span entries - repeated opencensus.proto.trace.v1.Span spans = 2; -} diff --git a/api/envoy/type/matcher/v3/metadata.proto b/api/envoy/type/matcher/v3/metadata.proto index 68710dc718546..de19a2f34dbd1 100644 --- a/api/envoy/type/matcher/v3/metadata.proto +++ b/api/envoy/type/matcher/v3/metadata.proto @@ -101,4 +101,7 @@ message MetadataMatcher { // The MetadataMatcher is matched if the value retrieved by path is matched to this value. ValueMatcher value = 3 [(validate.rules).message = {required: true}]; + + // If true, the match result will be inverted. + bool invert = 4; } diff --git a/api/envoy/type/matcher/v4alpha/BUILD b/api/envoy/type/matcher/v4alpha/BUILD deleted file mode 100644 index 0d4a45d002ced..0000000000000 --- a/api/envoy/type/matcher/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/api/envoy/type/matcher/v4alpha/http_inputs.proto b/api/envoy/type/matcher/v4alpha/http_inputs.proto deleted file mode 100644 index bd7758ad53fbf..0000000000000 --- a/api/envoy/type/matcher/v4alpha/http_inputs.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "HttpInputsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common HTTP Inputs] - -// Match input indicates that matching should be done on a specific request header. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpRequestHeaderMatchInput { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.HttpRequestHeaderMatchInput"; - - // The request header to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicates that matching should be done on a specific request trailer. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpRequestTrailerMatchInput { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.HttpRequestTrailerMatchInput"; - - // The request trailer to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicating that matching should be done on a specific response header. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpResponseHeaderMatchInput { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.HttpResponseHeaderMatchInput"; - - // The response header to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicates that matching should be done on a specific response trailer. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpResponseTrailerMatchInput { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.HttpResponseTrailerMatchInput"; - - // The response trailer to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} diff --git a/api/envoy/type/matcher/v4alpha/metadata.proto b/api/envoy/type/matcher/v4alpha/metadata.proto deleted file mode 100644 index e61ba2754337b..0000000000000 --- a/api/envoy/type/matcher/v4alpha/metadata.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/value.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Metadata matcher] - -// MetadataMatcher provides a general interface to check if a given value is matched in -// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value -// from the Metadata and then check if it's matched to the specified value. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.filters.http.rbac: -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following MetadataMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to -// enforce access control based on dynamic metadata in a request. See :ref:`Permission -// ` and :ref:`Principal -// `. - -// [#next-major-version: MetadataMatcher should use StructMatcher] -message MetadataMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.MetadataMatcher"; - - // Specifies the segment in a path to retrieve value from Metadata. - // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that - // if the segment key refers to a list, it has to be the last segment in a path. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.MetadataMatcher.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/type/matcher/v4alpha/node.proto b/api/envoy/type/matcher/v4alpha/node.proto deleted file mode 100644 index a74bf808f05ae..0000000000000 --- a/api/envoy/type/matcher/v4alpha/node.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/matcher/v4alpha/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Node matcher] - -// Specifies the way to match a Node. -// The match follows AND semantics. -message NodeMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.NodeMatcher"; - - // Specifies match criteria on the node id. - StringMatcher node_id = 1; - - // Specifies match criteria on the node metadata. - repeated StructMatcher node_metadatas = 2; -} diff --git a/api/envoy/type/matcher/v4alpha/number.proto b/api/envoy/type/matcher/v4alpha/number.proto deleted file mode 100644 index b168af19ab50c..0000000000000 --- a/api/envoy/type/matcher/v4alpha/number.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/v3/range.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "NumberProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Number matcher] - -// Specifies the way to match a double value. -message DoubleMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.DoubleMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // If specified, the input double value must be in the range specified here. - // Note: The range is using half-open interval semantics [start, end). - v3.DoubleRange range = 1; - - // If specified, the input double value must be equal to the value specified here. - double exact = 2; - } -} diff --git a/api/envoy/type/matcher/v4alpha/path.proto b/api/envoy/type/matcher/v4alpha/path.proto deleted file mode 100644 index 9150939bf2eed..0000000000000 --- a/api/envoy/type/matcher/v4alpha/path.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "PathProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Path matcher] - -// Specifies the way to match a path on HTTP request. -message PathMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.PathMatcher"; - - oneof rule { - option (validate.required) = true; - - // The `path` must match the URL path portion of the :path header. The query and fragment - // string (if present) are removed in the URL path portion. - // For example, the path */data* will match the *:path* header */data#fragment?param=value*. - StringMatcher path = 1 [(validate.rules).message = {required: true}]; - } -} diff --git a/api/envoy/type/matcher/v4alpha/regex.proto b/api/envoy/type/matcher/v4alpha/regex.proto deleted file mode 100644 index 537635ec87d04..0000000000000 --- a/api/envoy/type/matcher/v4alpha/regex.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "RegexProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Regex matcher] - -// A regex matcher designed for safety when used with untrusted input. -message RegexMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher"; - - // Google's `RE2 `_ regex engine. The regex string must adhere to - // the documented `syntax `_. The engine is designed - // to complete execution in linear time as well as limit the amount of memory used. - // - // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` - // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or - // complexity that a compiled regex can have before an exception is thrown or a warning is - // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and - // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). - // - // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, - // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented - // each time the program size exceeds the warn level threshold. - message GoogleRE2 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; - - reserved 1; - - reserved "max_program_size"; - } - - oneof engine_type { - option (validate.required) = true; - - // Google's RE2 regex engine. - GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; - } - - // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Describes how to match a string and then produce a new string using a regular -// expression and a substitution string. -message RegexMatchAndSubstitute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.RegexMatchAndSubstitute"; - - // The regular expression used to find portions of a string (hereafter called - // the "subject string") that should be replaced. When a new string is - // produced during the substitution operation, the new string is initially - // the same as the subject string, but then all matches in the subject string - // are replaced by the substitution string. If replacing all matches isn't - // desired, regular expression anchors can be used to ensure a single match, - // so as to replace just one occurrence of a pattern. Capture groups can be - // used in the pattern to extract portions of the subject string, and then - // referenced in the substitution string. - RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; - - // The string that should be substituted into matching portions of the - // subject string during a substitution operation to produce a new string. - // Capture groups in the pattern can be referenced in the substitution - // string. Note, however, that the syntax for referring to capture groups is - // defined by the chosen regular expression engine. Google's `RE2 - // `_ regular expression engine uses a - // backslash followed by the capture group number to denote a numbered - // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers - // to capture group 2. - string substitution = 2; -} diff --git a/api/envoy/type/matcher/v4alpha/string.proto b/api/envoy/type/matcher/v4alpha/string.proto deleted file mode 100644 index f9fa48cd31956..0000000000000 --- a/api/envoy/type/matcher/v4alpha/string.proto +++ /dev/null @@ -1,78 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/regex.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "StringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: String matcher] - -// Specifies the way to match a string. -// [#next-free-field: 8] -message StringMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.StringMatcher"; - - reserved 4; - - reserved "regex"; - - oneof match_pattern { - option (validate.required) = true; - - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - string exact = 1; - - // The input string must have the prefix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_len: 1}]; - - // The input string must have the suffix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_len: 1}]; - - // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; - - // The input string must have the substring specified here. - // Note: empty contains match is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc.def* - string contains = 7 [(validate.rules).string = {min_len: 1}]; - } - - // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This - // has no effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. - bool ignore_case = 6; -} - -// Specifies a list of ways to match a string. -message ListStringMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.ListStringMatcher"; - - repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/api/envoy/type/matcher/v4alpha/struct.proto b/api/envoy/type/matcher/v4alpha/struct.proto deleted file mode 100644 index 328ac555bd810..0000000000000 --- a/api/envoy/type/matcher/v4alpha/struct.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/value.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Struct matcher] - -// StructMatcher provides a general interface to check if a given value is matched in -// google.protobuf.Struct. It uses `path` to retrieve the value -// from the struct and then check if it's matched to the specified value. -// -// For example, for the following Struct: -// -// .. code-block:: yaml -// -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following StructMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. -message StructMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.StructMatcher"; - - // Specifies the segment in a path to retrieve value from Struct. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.StructMatcher.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The StructMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/api/envoy/type/matcher/v4alpha/value.proto b/api/envoy/type/matcher/v4alpha/value.proto deleted file mode 100644 index 6e509d4601099..0000000000000 --- a/api/envoy/type/matcher/v4alpha/value.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/number.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "ValueProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Value matcher] - -// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. -// StructValue is not supported and is always not matched. -// [#next-free-field: 7] -message ValueMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ValueMatcher"; - - // NullMatch is an empty message to specify a null value. - message NullMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.ValueMatcher.NullMatch"; - } - - // Specifies how to match a value. - oneof match_pattern { - option (validate.required) = true; - - // If specified, a match occurs if and only if the target value is a NullValue. - NullMatch null_match = 1; - - // If specified, a match occurs if and only if the target value is a double value and is - // matched to this field. - DoubleMatcher double_match = 2; - - // If specified, a match occurs if and only if the target value is a string value and is - // matched to this field. - StringMatcher string_match = 3; - - // If specified, a match occurs if and only if the target value is a bool value and is equal - // to this field. - bool bool_match = 4; - - // If specified, value match will be performed based on whether the path is referring to a - // valid primitive value in the metadata. If the path is referring to a non-primitive value, - // the result is always not matched. - bool present_match = 5; - - // If specified, a match occurs if and only if the target value is a list value and - // is matched to this field. - ListMatcher list_match = 6; - } -} - -// Specifies the way to match a list value. -message ListMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ListMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // If specified, at least one of the values in the list must match the value specified. - ValueMatcher one_of = 1; - } -} diff --git a/api/test/validate/BUILD b/api/test/validate/BUILD index be7374cfc1fb0..79b6d6d91da23 100644 --- a/api/test/validate/BUILD +++ b/api/test/validate/BUILD @@ -6,12 +6,10 @@ api_cc_test( name = "pgv_test", srcs = ["pgv_test.cc"], deps = [ - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/api/v2/core:pkg_cc_proto", - "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", - "@envoy_api//envoy/api/v2/route:pkg_cc_proto", - "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/accesslog/v2:pkg_cc_proto", + "@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/compression/gzip/decompressor/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto", @@ -22,11 +20,11 @@ api_cc_test( "@envoy_api//envoy/extensions/filters/http/ip_tagging/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/mongo_proxy/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/health_checkers/redis/v3:pkg_cc_proto", + "@envoy_api//envoy/service/listener/v3:pkg_cc_proto", ], ) diff --git a/api/test/validate/pgv_test.cc b/api/test/validate/pgv_test.cc index d89f99cbc31c2..75f7692ba3e69 100644 --- a/api/test/validate/pgv_test.cc +++ b/api/test/validate/pgv_test.cc @@ -4,13 +4,12 @@ // We don't use all the headers in the test below, but including them anyway as // a cheap way to get some C++ compiler sanity checking. -#include "envoy/api/v2/cluster.pb.validate.h" -#include "envoy/api/v2/endpoint.pb.validate.h" -#include "envoy/api/v2/listener.pb.validate.h" -#include "envoy/api/v2/route.pb.validate.h" -#include "envoy/api/v2/core/protocol.pb.validate.h" -#include "envoy/config/filter/accesslog/v2/accesslog.pb.validate.h" -#include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.validate.h" +#include "envoy/config/cluster/v3/cluster.pb.validate.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" +#include "envoy/config/listener/v3/listener.pb.validate.h" +#include "envoy/config/route/v3/route.pb.validate.h" +#include "envoy/config/core/v3/protocol.pb.validate.h" +#include "envoy/config/accesslog/v3/accesslog.pb.validate.h" #include "envoy/extensions/compression/gzip/decompressor/v3/gzip.pb.validate.h" #include "envoy/extensions/filters/http/buffer/v3/buffer.pb.validate.h" #include "envoy/extensions/filters/http/fault/v3/fault.pb.validate.h" @@ -20,15 +19,12 @@ #include "envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.validate.h" #include "envoy/extensions/filters/http/lua/v3/lua.pb.validate.h" #include "envoy/extensions/filters/http/router/v3/router.pb.validate.h" -#include "envoy/extensions/filters/http/squash/v3/squash.pb.validate.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" #include "envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.validate.h" #include "envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h" #include "envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.validate.h" #include "envoy/extensions/health_checkers/redis/v3/redis.pb.validate.h" -#include "envoy/api/v2/listener/listener.pb.validate.h" -#include "envoy/api/v2/route/route.pb.validate.h" -#include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.validate.h" #include "google/protobuf/text_format.h" @@ -53,8 +49,8 @@ template struct TestCase { // Basic protoc-gen-validate C++ validation header inclusion and Validate calls // from data plane API. -int main(int argc, char* argv[]) { - envoy::config::bootstrap::v2::Bootstrap invalid_bootstrap; +int main(int /*argc*/, char* /*argv*/[]) { + envoy::config::bootstrap::v3::Bootstrap invalid_bootstrap; invalid_bootstrap.mutable_static_resources()->add_clusters(); // This is a baseline test of the validation features we care about. It's // probably not worth adding in every filter and field that we want to valid @@ -68,12 +64,12 @@ int main(int argc, char* argv[]) { address { pipe { path: "/" } } } )EOF"; - envoy::config::bootstrap::v2::Bootstrap valid_bootstrap; + envoy::config::bootstrap::v3::Bootstrap valid_bootstrap; if (!google::protobuf::TextFormat::ParseFromString(valid_bootstrap_text, &valid_bootstrap)) { std::cerr << "Unable to parse text proto: " << valid_bootstrap_text << std::endl; exit(EXIT_FAILURE); } - TestCase{invalid_bootstrap, valid_bootstrap}.run(); + TestCase{invalid_bootstrap, valid_bootstrap}.run(); exit(EXIT_SUCCESS); } diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 51bc63183a1c4..52cb8c09eaf81 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -9,6 +9,12 @@ proto_library( name = "active_protos", visibility = ["//visibility:public"], deps = [ + "//contrib/envoy/extensions/filters/http/squash/v3:pkg", + "//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg", + "//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg", + "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", + "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", + "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", @@ -46,6 +52,7 @@ proto_library( "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", + "//envoy/extensions/common/key_value/v3:pkg", "//envoy/extensions/common/matching/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", @@ -95,7 +102,6 @@ proto_library( "//envoy/extensions/filters/http/rbac/v3:pkg", "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/set_metadata/v3:pkg", - "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", @@ -111,15 +117,11 @@ proto_library( "//envoy/extensions/filters/network/echo/v3:pkg", "//envoy/extensions/filters/network/ext_authz/v3:pkg", "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", - "//envoy/extensions/filters/network/kafka_broker/v3:pkg", "//envoy/extensions/filters/network/local_ratelimit/v3:pkg", "//envoy/extensions/filters/network/mongo_proxy/v3:pkg", - "//envoy/extensions/filters/network/mysql_proxy/v3:pkg", - "//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", - "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", @@ -130,6 +132,7 @@ proto_library( "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", + "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", "//envoy/extensions/health_checkers/redis/v3:pkg", "//envoy/extensions/http/header_formatters/preserve_case/v3:pkg", @@ -138,6 +141,7 @@ proto_library( "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", + "//envoy/extensions/key_value/file_based/v3:pkg", "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", "//envoy/extensions/matching/input_matchers/ip/v3:pkg", diff --git a/bazel/README.md b/bazel/README.md index b8c2b0a03f8df..cf575fa58c197 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -668,7 +668,10 @@ The following optional features can be enabled on the Bazel build command-line: Envoy uses a modular build which allows extensions to be removed if they are not needed or desired. Extensions that can be removed are contained in -[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). +[extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). Contrib build +extensions are contained in [contrib_build_config.bzl](../contrib/contrib_build_config.bzl). Note +that contrib extensions are only included by default when building the contrib executable and in +the default contrib images pushed to Docker Hub. The extensions disabled by default can be enabled by adding the following parameter to Bazel, for example to enable `envoy.filters.http.kill_request` extension, add `--//source/extensions/filters/http/kill_request:enabled`. @@ -681,6 +684,13 @@ If you're building from a custom build repository, the parameters need to prefix You may persist those options in `user.bazelrc` in Envoy repo or your `.bazelrc`. +Contrib extensions can be enabled and disabled similarly to above when building the contrib +executable. For example: + +`bazel build //contrib/exe:envoy-static --//contrib/squash/filters/http/source:enabled=false` + +Will disable the squash extension when building the contrib executable. + ## Customize extension build config You can also use the following procedure to customize the extensions for your build: @@ -719,6 +729,11 @@ local_repository( ... ``` +When performing custom builds, it is acceptable to include contrib extensions as well. This can +be done by including the desired Bazel paths from [contrib_build_config.bzl](../contrib/contrib_build_config.bzl) +into the overriden `extensions_build_config.bzl`. (There is no need to specifically perform +a contrib build to include a contrib extension.) + ## Extra extensions If you are building your own Envoy extensions or custom Envoy builds and encounter visibility diff --git a/bazel/api_binding.bzl b/bazel/api_binding.bzl index 550685f6299a4..362e1803a1ef8 100644 --- a/bazel/api_binding.bzl +++ b/bazel/api_binding.bzl @@ -8,6 +8,8 @@ def _default_envoy_api_impl(ctx): "test", "tools", "versioning", + "contrib", + "buf.yaml", ] for d in api_dirs: ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child(ctx.attr.reldir).get_child(d), d) diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index a0463ab770d04..4d671ab9562fa 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -6,6 +6,7 @@ load(":envoy_internal.bzl", "envoy_external_dep_path") load( ":envoy_library.bzl", _envoy_basic_cc_library = "envoy_basic_cc_library", + _envoy_cc_contrib_extension = "envoy_cc_contrib_extension", _envoy_cc_extension = "envoy_cc_extension", _envoy_cc_library = "envoy_cc_library", _envoy_cc_linux_library = "envoy_cc_linux_library", @@ -50,8 +51,8 @@ load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") def envoy_package(): native.package(default_visibility = ["//visibility:public"]) -def envoy_extension_package(enabled_default = True): - native.package(default_visibility = EXTENSION_PACKAGE_VISIBILITY) +def envoy_extension_package(enabled_default = True, default_visibility = EXTENSION_PACKAGE_VISIBILITY): + native.package(default_visibility = default_visibility) bool_flag( name = "enabled", @@ -63,6 +64,9 @@ def envoy_extension_package(enabled_default = True): flag_values = {":enabled": "True"}, ) +def envoy_contrib_package(): + envoy_extension_package(default_visibility = ["//:contrib_library"]) + # A genrule variant that can output a directory. This is useful when doing things like # generating a fuzz corpus mechanically. def _envoy_directory_genrule_impl(ctx): @@ -220,6 +224,7 @@ envoy_cc_binary = _envoy_cc_binary # Library wrappers (from envoy_library.bzl) envoy_basic_cc_library = _envoy_basic_cc_library envoy_cc_extension = _envoy_cc_extension +envoy_cc_contrib_extension = _envoy_cc_contrib_extension envoy_cc_library = _envoy_cc_library envoy_cc_linux_library = _envoy_cc_linux_library envoy_cc_posix_library = _envoy_cc_posix_library diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index c29f35b160c0c..ac74d1be29c96 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -71,6 +71,14 @@ def envoy_cc_extension( visibility = visibility, ) +def envoy_cc_contrib_extension( + name, + tags = [], + extra_visibility = [], + visibility = ["//:contrib_library"], + **kwargs): + envoy_cc_extension(name, tags, extra_visibility, visibility, **kwargs) + # Envoy C++ library targets should be specified with this function. def envoy_cc_library( name, diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 7f45c554c54db..799e60154afc5 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -156,7 +156,8 @@ def envoy_cc_test( coverage = True, local = False, size = "medium", - flaky = False): + flaky = False, + env = {}): coverage_tags = tags + ([] if coverage else ["nocoverage"]) cc_test( @@ -180,6 +181,7 @@ def envoy_cc_test( shard_count = shard_count, size = size, flaky = flaky, + env = env, ) # Envoy C++ test related libraries (that want gtest, gmock) should be specified diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index e7a654c919231..f8a1079ac93cd 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -1038,6 +1038,7 @@ envoy_cc_library( deps = [ ":quic_platform_export", ":quiche_common_lib", + ":quiche_common_platform_default_quiche_platform_impl_export_lib", "@envoy//source/common/quic/platform:quic_platform_base_impl_lib", ], ) @@ -1155,7 +1156,10 @@ envoy_cc_test_library( hdrs = ["quiche/quic/platform/api/quic_test.h"], repository = "@envoy", tags = ["nofips"], - deps = ["@envoy//test/common/quic/platform:quic_platform_test_impl_lib"], + deps = [ + ":quiche_common_platform_test", + "@envoy//test/common/quic/platform:quic_platform_test_impl_lib", + ], ) envoy_cc_test_library( @@ -1758,6 +1762,26 @@ envoy_cc_library( deps = [":quic_core_time_lib"], ) +envoy_cc_library( + name = "quic_core_connection_context_lib", + srcs = [ + "quiche/quic/core/quic_connection_context.cc", + ], + hdrs = [ + "quiche/quic/core/quic_connection_context.h", + ], + copts = quiche_copts, + external_deps = [ + "abseil_str_format", + ], + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quic_platform_export", + ":quiche_common_platform", + ], +) + envoy_cc_library( name = "quic_core_connection_id_manager", srcs = ["quiche/quic/core/quic_connection_id_manager.cc"], @@ -1791,6 +1815,7 @@ envoy_cc_library( ":quic_core_bandwidth_lib", ":quic_core_blocked_writer_interface_lib", ":quic_core_config_lib", + ":quic_core_connection_context_lib", ":quic_core_connection_id_manager", ":quic_core_connection_stats_lib", ":quic_core_crypto_crypto_handshake_lib", @@ -2251,7 +2276,6 @@ envoy_cc_library( ":quic_core_types_lib", ":quic_core_versions_lib", ":quic_platform_base", - ":quic_platform_mem_slice_span", ], ) @@ -3260,7 +3284,6 @@ envoy_cc_library( ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_platform", - ":quic_platform_mem_slice_span", ":quiche_common_text_utils_lib", ":spdy_core_protocol_lib", ], @@ -3314,7 +3337,6 @@ envoy_cc_library( ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_platform_base", - ":quic_platform_mem_slice_span", ":quiche_common_circular_deque_lib", ], ) @@ -3972,6 +3994,7 @@ envoy_cc_library( "quiche/common/platform/api/quiche_flags.h", "quiche/common/platform/api/quiche_logging.h", "quiche/common/platform/api/quiche_prefetch.h", + "quiche/common/platform/api/quiche_thread_local.h", "quiche/common/platform/api/quiche_time_utils.h", ], repository = "@envoy", @@ -3987,7 +4010,9 @@ envoy_cc_library( envoy_cc_library( name = "quiche_common_platform_default_quiche_platform_impl_export_lib", hdrs = [ + "quiche/common/platform/default/quiche_platform_impl/quiche_containers_impl.h", "quiche/common/platform/default/quiche_platform_impl/quiche_export_impl.h", + "quiche/common/platform/default/quiche_platform_impl/quiche_thread_local_impl.h", ], repository = "@envoy", tags = ["nofips"], @@ -4140,32 +4165,17 @@ envoy_cc_test( ], ) -envoy_cc_library( - name = "quic_platform_mem_slice_span", - hdrs = [ - "quiche/quic/platform/api/quic_mem_slice_span.h", - ], - copts = quiche_copts, - repository = "@envoy", - tags = ["nofips"], - visibility = ["//visibility:public"], - deps = ["@envoy//source/common/quic/platform:quic_platform_mem_slice_span_impl_lib"], -) - -envoy_cc_test_library( - name = "quic_platform_test_mem_slice_vector_lib", - hdrs = ["quiche/quic/platform/api/quic_test_mem_slice_vector.h"], - repository = "@envoy", - tags = ["nofips"], - deps = ["@envoy//test/common/quic/platform:quic_platform_test_mem_slice_vector_impl_lib"], -) - envoy_cc_library( name = "quic_platform_mem_slice_storage", + srcs = ["quiche/quic/platform/api/quic_mem_slice_storage.cc"], hdrs = ["quiche/quic/platform/api/quic_mem_slice_storage.h"], repository = "@envoy", visibility = ["//visibility:public"], - deps = ["@envoy//source/common/quic/platform:quic_platform_mem_slice_storage_impl_lib"], + deps = [ + ":quic_core_types_lib", + ":quic_core_utils_lib", + ":quic_platform_base", + ], ) envoy_cc_test( @@ -4184,7 +4194,6 @@ envoy_cc_test( envoy_cc_test( name = "quic_platform_api_test", srcs = [ - "quiche/quic/platform/api/quic_mem_slice_span_test.cc", "quiche/quic/platform/api/quic_mem_slice_storage_test.cc", "quiche/quic/platform/api/quic_mem_slice_test.cc", "quiche/quic/platform/api/quic_reference_counted_test.cc", @@ -4195,10 +4204,8 @@ envoy_cc_test( deps = [ ":quic_core_buffer_allocator_lib", ":quic_platform", - ":quic_platform_mem_slice_span", ":quic_platform_mem_slice_storage", ":quic_platform_test", - ":quic_platform_test_mem_slice_vector_lib", ], ) diff --git a/bazel/external/quiche.genrule_cmd b/bazel/external/quiche.genrule_cmd index 6719aa0f7227c..ed451e6d9e338 100644 --- a/bazel/external/quiche.genrule_cmd +++ b/bazel/external/quiche.genrule_cmd @@ -13,7 +13,7 @@ set -e # tree structure.) # Determine base directory of unmodified QUICHE source files. In practice, this -# ends up being "external/com_googlesource_quiche". +# ends up being "external/com_github_google_quiche". src_base_dir=$$(dirname $$(dirname $$(dirname $(rootpath quic/core/quic_constants.h)))) # sed commands to apply to each source file. @@ -73,7 +73,7 @@ EOF for src_file in $(SRCS); do # Extract relative path (e.g. "quic/core/quic_utils.cc") from full path in - # src_path (e.g. "external/com_googlesource_quiche/quic/core/quic_utils.cc"). + # src_path (e.g. "external/com_github_google_quiche/quic/core/quic_utils.cc"). src_path="$${src_file#$$src_base_dir/}" # Map to output file with quiche/ base directory inserted in path. diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index b24046ea99c57..2c9b481282cb0 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -37,6 +37,32 @@ cc_library( ], ) +# Kafka client dependency used by Kafka-mesh filter. +# librdkafka build generates extra headers that need to be copied into source to get it to compile. +configure_make( + name = "librdkafka_build", + configure_in_place = True, + configure_options = ["--disable-ssl --disable-gssapi --disable-lz4-ext --disable-zstd && cp Makefile.config src/.. && cp config.h src/.."], + lib_source = "@edenhill_librdkafka//:all", + make_commands = [ + "make ARFLAGS='' libs install-subdirs", + ], + static_libraries = [ + "librdkafka.a", + "librdkafka++.a", + ], + tags = ["skip_on_windows"], + alwayslink = True, +) + +cc_library( + name = "librdkafka", + tags = ["skip_on_windows"], + deps = [ + "librdkafka_build", + ], +) + configure_make( name = "luajit", configure_command = "build.py", @@ -76,6 +102,24 @@ configure_make( tags = ["skip_on_windows"], ) +envoy_cmake_external( + name = "libsxg", + cache_entries = { + "CMAKE_BUILD_TYPE": "Release", + "SXG_BUILD_EXECUTABLES": "off", + "SXG_BUILD_SHARED": "off", + "SXG_BUILD_STATIC": "on", + "SXG_WITH_CERT_CHAIN": "off", + "RUN_TEST": "off", + "CMAKE_INSTALL_LIBDIR": "lib", + "CMAKE_TRY_COMPILE_TARGET_TYPE": "STATIC_LIBRARY", + }, + lib_source = "@com_github_google_libsxg//:all", + static_libraries = ["libsxg.a"], + tags = ["skip_on_windows"], + deps = ["@boringssl//:ssl"], +) + envoy_cmake_external( name = "ares", cache_entries = { @@ -92,7 +136,7 @@ envoy_cmake_external( "//conditions:default": [], }), postfix_script = select({ - "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/nameser.h $INSTALLDIR/include/nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", + "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_nameser.h $INSTALLDIR/include/ares_nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", "//conditions:default": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", }), static_libraries = select({ diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index be758fa4cebfd..c53d62da1bbb6 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -8,6 +8,7 @@ load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_languag PPC_SKIP_TARGETS = ["envoy.filters.http.lua"] WINDOWS_SKIP_TARGETS = [ + "envoy.filters.http.sxg", "envoy.tracers.dynamic_ot", "envoy.tracers.lightstep", "envoy.tracers.datadog", @@ -134,6 +135,7 @@ def envoy_dependencies(skip_targets = []): _com_github_google_benchmark() _com_github_google_jwt_verify() _com_github_google_libprotobuf_mutator() + _com_github_google_libsxg() _com_github_google_tcmalloc() _com_github_gperftools_gperftools() _com_github_grpc_grpc() @@ -155,7 +157,7 @@ def envoy_dependencies(skip_targets = []): _com_github_curl() _com_github_envoyproxy_sqlparser() _com_googlesource_chromium_v8() - _com_googlesource_quiche() + _com_github_google_quiche() _com_googlesource_googleurl() _com_lightstep_tracer_cpp() _io_opentracing_cpp() @@ -312,6 +314,17 @@ def _com_github_google_libprotobuf_mutator(): build_file = "@envoy//bazel/external:libprotobuf_mutator.BUILD", ) +def _com_github_google_libsxg(): + external_http_archive( + name = "com_github_google_libsxg", + build_file_content = BUILD_ALL_CONTENT, + ) + + native.bind( + name = "libsxg", + actual = "@envoy//bazel/foreign_cc:libsxg", + ) + def _com_github_jbeder_yaml_cpp(): external_http_archive( name = "com_github_jbeder_yaml_cpp", @@ -738,31 +751,31 @@ def _com_googlesource_chromium_v8(): actual = "@com_googlesource_chromium_v8//:wee8", ) -def _com_googlesource_quiche(): +def _com_github_google_quiche(): external_genrule_repository( - name = "com_googlesource_quiche", + name = "com_github_google_quiche", genrule_cmd_file = "@envoy//bazel/external:quiche.genrule_cmd", build_file = "@envoy//bazel/external:quiche.BUILD", ) native.bind( name = "quiche_common_platform", - actual = "@com_googlesource_quiche//:quiche_common_platform", + actual = "@com_github_google_quiche//:quiche_common_platform", ) native.bind( name = "quiche_http2_platform", - actual = "@com_googlesource_quiche//:http2_platform", + actual = "@com_github_google_quiche//:http2_platform", ) native.bind( name = "quiche_spdy_platform", - actual = "@com_googlesource_quiche//:spdy_platform", + actual = "@com_github_google_quiche//:spdy_platform", ) native.bind( name = "quiche_quic_platform", - actual = "@com_googlesource_quiche//:quic_platform", + actual = "@com_github_google_quiche//:quic_platform", ) native.bind( name = "quiche_quic_platform_base", - actual = "@com_googlesource_quiche//:quic_platform_base", + actual = "@com_github_google_quiche//:quic_platform_base", ) def _com_googlesource_googleurl(): @@ -1010,6 +1023,17 @@ filegroup( patches = ["@envoy//bazel/external:kafka_int32.patch"], ) + # This archive provides Kafka C/CPP client used by mesh filter to communicate with upstream + # Kafka clusters. + external_http_archive( + name = "edenhill_librdkafka", + build_file_content = BUILD_ALL_CONTENT, + ) + native.bind( + name = "librdkafka", + actual = "@envoy//bazel/foreign_cc:librdkafka", + ) + # This archive provides Kafka (and Zookeeper) binaries, that are used during Kafka integration # tests. external_http_archive( diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index e04a4ac41d7a5..6b9c483a6ea72 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -1,12 +1,9 @@ -load("@rules_python//python:repositories.bzl", "py_repositories") load("@rules_python//python:pip.bzl", "pip_install") load("@proxy_wasm_cpp_host//bazel/cargo:crates.bzl", "proxy_wasm_cpp_host_fetch_remote_crates") load("//bazel/external/cargo:crates.bzl", "raze_fetch_remote_crates") # Python dependencies. def _python_deps(): - py_repositories() - pip_install( name = "base_pip3", requirements = "@envoy//tools/base:requirements.txt", @@ -47,29 +44,24 @@ def _python_deps(): requirements = "@envoy//tools/docs:requirements.txt", extra_pip_args = ["--require-hashes"], ) - pip_install( - name = "docker_pip3", - requirements = "@envoy//tools/docker:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) pip_install( name = "deps_pip3", requirements = "@envoy//tools/dependency:requirements.txt", extra_pip_args = ["--require-hashes"], ) pip_install( - name = "git_pip3", - requirements = "@envoy//tools/git:requirements.txt", + name = "distribution_pip3", + requirements = "@envoy//tools/distribution:requirements.txt", extra_pip_args = ["--require-hashes"], ) pip_install( - name = "gpg_pip3", - requirements = "@envoy//tools/gpg:requirements.txt", + name = "git_pip3", + requirements = "@envoy//tools/git:requirements.txt", extra_pip_args = ["--require-hashes"], ) pip_install( name = "kafka_pip3", - requirements = "@envoy//source/extensions/filters/network/kafka:requirements.txt", + requirements = "@envoy//contrib/kafka/filters/network/source:requirements.txt", extra_pip_args = ["--require-hashes"], # project_name = "Jinja", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index a4b339cc7951a..b817e6efffab7 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -117,12 +117,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "c-ares", project_desc = "C library for asynchronous DNS requests", project_url = "https://c-ares.haxx.se/", - version = "1.17.1", - sha256 = "d73dd0f6de824afd407ce10750ea081af47eba52b8a6cb307d220131ad93fc40", + version = "1.17.2", + sha256 = "4803c844ce20ce510ef0eb83f8ea41fa24ecaae9d280c468c582d2bb25b3913d", strip_prefix = "c-ares-{version}", urls = ["https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - release_date = "2020-11-19", + release_date = "2021-08-10", cpe = "cpe:2.3:a:c-ares_project:c-ares:*", ), com_github_circonus_labs_libcircllhist = dict( @@ -211,6 +211,19 @@ REPOSITORY_LOCATIONS_SPEC = dict( release_date = "2020-11-13", use_category = ["test_only"], ), + com_github_google_libsxg = dict( + project_name = "libsxg", + project_desc = "Signed HTTP Exchange library", + project_url = "https://github.com/google/libsxg", + version = "beaa3939b76f8644f6833267e9f2462760838f18", + sha256 = "082bf844047a9aeec0d388283d5edc68bd22bcf4d32eb5a566654ae89956ad1f", + strip_prefix = "libsxg-{version}", + urls = ["https://github.com/google/libsxg/archive/{version}.tar.gz"], + use_category = ["other"], + extensions = ["envoy.filters.http.sxg"], + release_date = "2021-07-08", + cpe = "N/A", + ), com_github_google_tcmalloc = dict( project_name = "tcmalloc", project_desc = "Fast, multi-threaded malloc implementation", @@ -662,9 +675,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Python rules for Bazel", project_desc = "Bazel rules for the Python language", project_url = "https://github.com/bazelbuild/rules_python", - version = "0.1.0", - sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0", - release_date = "2020-10-15", + version = "0.3.0", + sha256 = "934c9ceb552e84577b0faf1e5a2f0450314985b4d8712b2b70717dc679fdc01b", + release_date = "2021-06-23", urls = ["https://github.com/bazelbuild/rules_python/releases/download/{version}/rules_python-{version}.tar.gz"], use_category = ["build"], ), @@ -804,16 +817,17 @@ REPOSITORY_LOCATIONS_SPEC = dict( release_date = "2021-06-25", cpe = "cpe:2.3:a:google:v8:*", ), - com_googlesource_quiche = dict( + com_github_google_quiche = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", - project_url = "https://quiche.googlesource.com/quiche", - version = "5dd7a030209f9a6b5043bebd8ac3ee54f18d1d08", - sha256 = "306342cb35cb9d8baea079c7b924b0133c53cbf182b251655e589d3b4604dc41", + project_url = "https://github.com/google/quiche", + version = "8d5eb27ee2e3f009f7180e8ace0ff97830d9c3e9", + sha256 = "88cc71556b96bbec953a716a12c26f88b8af4d5e9a83cf3ec38aba4caed6bf52", # Static snapshot of https://quiche.googlesource.com/quiche/+archive/{version}.tar.gz - urls = ["https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz"], + urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], + strip_prefix = "quiche-{version}", use_category = ["dataplane_core"], - release_date = "2021-07-16", + release_date = "2021-08-31", cpe = "N/A", ), com_googlesource_googleurl = dict( @@ -925,6 +939,19 @@ REPOSITORY_LOCATIONS_SPEC = dict( release_date = "2020-03-03", cpe = "cpe:2.3:a:apache:kafka:*", ), + edenhill_librdkafka = dict( + project_name = "Kafka (C/C++ client)", + project_desc = "C/C++ client for Apache Kafka (open-source distributed event streaming platform)", + project_url = "https://github.com/edenhill/librdkafka", + version = "1.7.0", + sha256 = "c71b8c5ff419da80c31bb8d3036a408c87ad523e0c7588e7660ee5f3c8973057", + strip_prefix = "librdkafka-{version}", + urls = ["https://github.com/edenhill/librdkafka/archive/v{version}.tar.gz"], + use_category = ["dataplane_ext"], + extensions = ["envoy.filters.network.kafka_broker"], + release_date = "2021-05-10", + cpe = "N/A", + ), kafka_server_binary = dict( project_name = "Kafka (server binary)", project_desc = "Open-source distributed event streaming platform", @@ -975,8 +1002,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "WebAssembly for Proxies (C++ host implementation)", project_desc = "WebAssembly for Proxies (C++ host implementation)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host", - version = "82633e3ea94cdb96604e7fb01e3c0a36eb35c54d", - sha256 = "850e00833f27113a3ef74811233a596ebf0e08477f84eb0dc993f7009ae1ca95", + version = "03185974ef574233a5f6383311eb74a380146fe2", + sha256 = "34948e3ba239cc721af8d0a0a5b678325f363cbd542bddecf2267d24780d5b4d", strip_prefix = "proxy-wasm-cpp-host-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], @@ -992,7 +1019,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.wasm.runtime.wavm", "envoy.wasm.runtime.wasmtime", ], - release_date = "2021-07-13", + release_date = "2021-08-12", cpe = "N/A", ), proxy_wasm_rust_sdk = dict( diff --git a/bazel/rules_python.patch b/bazel/rules_python.patch index 5c373d8ff0297..205998745d576 100644 --- a/bazel/rules_python.patch +++ b/bazel/rules_python.patch @@ -1,13 +1,13 @@ diff --git a/python/pip_install/repositories.bzl b/python/pip_install/repositories.bzl -index df63674..80824e4 100644 +index 302ff0e..c40deae 100644 --- a/python/pip_install/repositories.bzl +++ b/python/pip_install/repositories.bzl -@@ -16,8 +16,8 @@ _RULE_DEPS = [ +@@ -26,8 +26,8 @@ _RULE_DEPS = [ ), ( "pypi__setuptools", -- "https://files.pythonhosted.org/packages/54/28/c45d8b54c1339f9644b87663945e54a8503cfef59cf0f65b3ff5dd17cf64/setuptools-42.0.2-py2.py3-none-any.whl", -- "c8abd0f3574bc23afd2f6fd2c415ba7d9e097c8a99b845473b0d957ba1e2dac6", +- "https://files.pythonhosted.org/packages/ab/b5/3679d7c98be5b65fa5522671ef437b792d909cf3908ba54fe9eca5d2a766/setuptools-44.1.0-py2.py3-none-any.whl", +- "992728077ca19db6598072414fb83e0a284aca1253aaf2e24bb1e55ee6db1a30", + "https://files.pythonhosted.org/packages/70/06/849cc805ac6332210083f2114a95b22ee252ce81ed4e1be4f1d2b87c9108/setuptools-54.0.0-py3-none-any.whl", + "d85b57c41e88b69ab87065c964134ec85b7573cbab0fdaa7ef32330ed764600a", ), diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy index 1c7711d540668..ac3cd3e74c12c 100644 --- a/ci/Dockerfile-envoy +++ b/ci/Dockerfile-envoy @@ -13,11 +13,12 @@ RUN apt-get update && apt-get upgrade -y \ RUN mkdir -p /etc/envoy +ARG ENVOY_BINARY=envoy ARG ENVOY_BINARY_SUFFIX=_stripped -ADD ${TARGETPLATFORM}/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ +ADD ${TARGETPLATFORM}/build_${ENVOY_BINARY}_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ ADD configs/envoyproxy_io_proxy.yaml /etc/envoy/envoy.yaml -ADD ${TARGETPLATFORM}/build_release/su-exec /usr/local/bin/ +ADD ${TARGETPLATFORM}/build_${ENVOY_BINARY}_release/su-exec /usr/local/bin/ RUN chown root:root /usr/local/bin/su-exec && adduser --group --system envoy EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine index 18b5aba78f4f2..36a23f6e3a2aa 100644 --- a/ci/Dockerfile-envoy-alpine +++ b/ci/Dockerfile-envoy-alpine @@ -6,7 +6,7 @@ RUN apk add --no-cache shadow su-exec \ && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy ARG ENVOY_BINARY_SUFFIX=_stripped -ADD linux/amd64/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ +ADD linux/amd64/build_envoy_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-distroless b/ci/Dockerfile-envoy-distroless index 630ccac08304e..d2647f7b38557 100644 --- a/ci/Dockerfile-envoy-distroless +++ b/ci/Dockerfile-envoy-distroless @@ -3,7 +3,7 @@ FROM gcr.io/distroless/base-debian10:nonroot ADD configs/envoyproxy_io_proxy.yaml /etc/envoy/envoy.yaml ARG ENVOY_BINARY_SUFFIX=_stripped -ADD linux/amd64/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ +ADD linux/amd64/build_envoy_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ EXPOSE 10000 diff --git a/ci/build_setup.sh b/ci/build_setup.sh index cedbda30a9e86..977c6477d7dc2 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -10,6 +10,8 @@ export PPROF_PATH=/thirdparty_build/bin/pprof [ -z "${ENVOY_SRCDIR}" ] && export ENVOY_SRCDIR=/source [ -z "${ENVOY_BUILD_TARGET}" ] && export ENVOY_BUILD_TARGET=//source/exe:envoy-static [ -z "${ENVOY_BUILD_DEBUG_INFORMATION}" ] && export ENVOY_BUILD_DEBUG_INFORMATION=//source/exe:envoy-static.dwp +[ -z "${ENVOY_CONTRIB_BUILD_TARGET}" ] && export ENVOY_CONTRIB_BUILD_TARGET=//contrib/exe:envoy-static +[ -z "${ENVOY_CONTRIB_BUILD_DEBUG_INFORMATION}" ] && export ENVOY_CONTRIB_BUILD_DEBUG_INFORMATION=//contrib/exe:envoy-static.dwp [ -z "${ENVOY_BUILD_ARCH}" ] && { ENVOY_BUILD_ARCH=$(uname -m) export ENVOY_BUILD_ARCH diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 2bdbfb5699cd0..d507d36993da0 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -62,47 +62,38 @@ function bazel_with_collection() { run_process_test_result } -function cp_binary_for_outside_access() { - DELIVERY_LOCATION="$1" - cp -f \ - bazel-bin/"${ENVOY_BIN}" \ - "${ENVOY_DELIVERY_DIR}"/"${DELIVERY_LOCATION}" -} - -function cp_debug_info_for_outside_access() { - DELIVERY_LOCATION="$1" - cp -f \ - bazel-bin/"${ENVOY_BIN}".dwp \ - "${ENVOY_DELIVERY_DIR}"/"${DELIVERY_LOCATION}".dwp -} - - function cp_binary_for_image_build() { + local BINARY_TYPE="$1" + local COMPILE_TYPE="$2" + local EXE_NAME="$3" + # TODO(mattklein123): Replace this with caching and a different job which creates images. local BASE_TARGET_DIR="${ENVOY_SRCDIR}${BUILD_ARCH_DIR}" + local TARGET_DIR=build_"${EXE_NAME}"_"${BINARY_TYPE}" + local FINAL_DELIVERY_DIR="${ENVOY_DELIVERY_DIR}"/"${EXE_NAME}" + echo "Copying binary for image build..." - COMPILE_TYPE="$2" - mkdir -p "${BASE_TARGET_DIR}"/build_"$1" - cp -f "${ENVOY_DELIVERY_DIR}"/envoy "${BASE_TARGET_DIR}"/build_"$1" + mkdir -p "${BASE_TARGET_DIR}"/"${TARGET_DIR}" + cp -f "${FINAL_DELIVERY_DIR}"/envoy "${BASE_TARGET_DIR}"/"${TARGET_DIR}" # Copy the su-exec utility binary into the image - cp -f bazel-bin/external/com_github_ncopa_suexec/su-exec "${BASE_TARGET_DIR}"/build_"$1" + cp -f bazel-bin/external/com_github_ncopa_suexec/su-exec "${BASE_TARGET_DIR}"/"${TARGET_DIR}" if [[ "${COMPILE_TYPE}" == "dbg" || "${COMPILE_TYPE}" == "opt" ]]; then - cp -f "${ENVOY_DELIVERY_DIR}"/envoy.dwp "${BASE_TARGET_DIR}"/build_"$1" + cp -f "${FINAL_DELIVERY_DIR}"/envoy.dwp "${BASE_TARGET_DIR}"/"${TARGET_DIR}" fi - mkdir -p "${BASE_TARGET_DIR}"/build_"$1"_stripped - strip "${ENVOY_DELIVERY_DIR}"/envoy -o "${BASE_TARGET_DIR}"/build_"$1"_stripped/envoy + mkdir -p "${BASE_TARGET_DIR}"/"${TARGET_DIR}"_stripped + strip "${FINAL_DELIVERY_DIR}"/envoy -o "${BASE_TARGET_DIR}"/"${TARGET_DIR}"_stripped/envoy # Copy for azp which doesn't preserve permissions, creating a tar archive - tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${BASE_TARGET_DIR}" build_"$1" build_"$1"_stripped + tar czf "${ENVOY_BUILD_DIR}"/"${EXE_NAME}"_binary.tar.gz -C "${BASE_TARGET_DIR}" "${TARGET_DIR}" "${TARGET_DIR}"_stripped # Remove binaries to save space, only if BUILD_REASON exists (running in AZP) [[ -z "${BUILD_REASON}" ]] || \ - rm -rf "${BASE_TARGET_DIR}"/build_"$1" "${BASE_TARGET_DIR}"/build_"$1"_stripped "${ENVOY_DELIVERY_DIR}"/envoy{,.dwp} \ + rm -rf "${BASE_TARGET_DIR:?}"/"${TARGET_DIR}" "${BASE_TARGET_DIR:?}"/"${TARGET_DIR}"_stripped "${FINAL_DELIVERY_DIR:?}"/envoy{,.dwp} \ bazel-bin/"${ENVOY_BIN}"{,.dwp} } function bazel_binary_build() { - BINARY_TYPE="$1" + local BINARY_TYPE="$1" if [[ "${BINARY_TYPE}" == "release" ]]; then COMPILE_TYPE="opt" elif [[ "${BINARY_TYPE}" == "debug" ]]; then @@ -116,35 +107,48 @@ function bazel_binary_build() { COMPILE_TYPE="fastbuild" fi - echo "Building..." - ENVOY_BIN=$(echo "${ENVOY_BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#') + local BUILD_TARGET="$2" + local BUILD_DEBUG_INFORMATION="$3" + local EXE_NAME="$4" + local FINAL_DELIVERY_DIR="${ENVOY_DELIVERY_DIR}"/"${EXE_NAME}" + mkdir -p "${FINAL_DELIVERY_DIR}" + + echo "Building (type=${BINARY_TYPE} target=${BUILD_TARGET} debug=${BUILD_DEBUG_INFORMATION} name=${EXE_NAME})..." + ENVOY_BIN=$(echo "${BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#') # This is a workaround for https://github.com/bazelbuild/bazel/issues/11834 [[ -n "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"* - bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${ENVOY_BUILD_TARGET}" ${CONFIG_ARGS} + bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${BUILD_TARGET}" ${CONFIG_ARGS} collect_build_profile "${BINARY_TYPE}"_build # Copy the built envoy binary somewhere that we can access outside of the # container. - cp_binary_for_outside_access envoy + cp -f bazel-bin/"${ENVOY_BIN}" "${FINAL_DELIVERY_DIR}"/envoy if [[ "${COMPILE_TYPE}" == "dbg" || "${COMPILE_TYPE}" == "opt" ]]; then # Generate dwp file for debugging since we used split DWARF to reduce binary # size - bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${ENVOY_BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS} + bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS} # Copy the debug information - cp_debug_info_for_outside_access envoy + cp -f bazel-bin/"${ENVOY_BIN}".dwp "${FINAL_DELIVERY_DIR}"/envoy.dwp fi # Build su-exec utility bazel build external:su-exec - cp_binary_for_image_build "${BINARY_TYPE}" "${COMPILE_TYPE}" + cp_binary_for_image_build "${BINARY_TYPE}" "${COMPILE_TYPE}" "${EXE_NAME}" +} +function bazel_envoy_binary_build() { + bazel_binary_build "$1" "${ENVOY_BUILD_TARGET}" "${ENVOY_BUILD_DEBUG_INFORMATION}" envoy +} + +function bazel_contrib_binary_build() { + bazel_binary_build "$1" "${ENVOY_CONTRIB_BUILD_TARGET}" "${ENVOY_CONTRIB_BUILD_DEBUG_INFORMATION}" envoy-contrib } function run_process_test_result() { - if [[ $(find "$TEST_TMPDIR" -name "*_attempt.xml" 2> /dev/null) ]]; then + if [[ -z "$CI_SKIP_PROCESS_TEST_RESULTS" ]] && [[ $(find "$TEST_TMPDIR" -name "*_attempt.xml" 2> /dev/null) ]]; then echo "running flaky test reporting script" "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET" else @@ -184,7 +188,11 @@ if [[ $# -ge 1 ]]; then else # Coverage test will add QUICHE tests by itself. COVERAGE_TEST_TARGETS=("//test/...") - TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "@com_googlesource_quiche//:ci_tests") + if [[ "$CI_TARGET" == "bazel.release" ]]; then + # We test contrib on release only. + COVERAGE_TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "//contrib/...") + fi + TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "@com_github_google_quiche//:ci_tests") fi if [[ "$CI_TARGET" == "bazel.release" ]]; then @@ -200,18 +208,22 @@ if [[ "$CI_TARGET" == "bazel.release" ]]; then echo "Testing ${TEST_TARGETS[*]} with options: ${BAZEL_BUILD_OPTIONS[*]}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c opt "${TEST_TARGETS[@]}" - echo "bazel release build with tests..." - bazel_binary_build release + echo "bazel release build..." + bazel_envoy_binary_build release + + echo "bazel contrib release build..." + bazel_contrib_binary_build release + exit 0 elif [[ "$CI_TARGET" == "bazel.release.server_only" ]]; then setup_clang_toolchain echo "bazel release build..." - bazel_binary_build release + bazel_envoy_binary_build release exit 0 elif [[ "$CI_TARGET" == "bazel.sizeopt.server_only" ]]; then setup_clang_toolchain echo "bazel size optimized build..." - bazel_binary_build sizeopt + bazel_envoy_binary_build sizeopt exit 0 elif [[ "$CI_TARGET" == "bazel.sizeopt" ]]; then setup_clang_toolchain @@ -219,7 +231,7 @@ elif [[ "$CI_TARGET" == "bazel.sizeopt" ]]; then bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" --config=sizeopt "${TEST_TARGETS[@]}" echo "bazel size optimized build with tests..." - bazel_binary_build sizeopt + bazel_envoy_binary_build sizeopt exit 0 elif [[ "$CI_TARGET" == "bazel.gcc" ]]; then BAZEL_BUILD_OPTIONS+=("--test_env=HEAPCHECK=") @@ -229,7 +241,7 @@ elif [[ "$CI_TARGET" == "bazel.gcc" ]]; then bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild -- "${TEST_TARGETS[@]}" echo "bazel release build with gcc..." - bazel_binary_build fastbuild + bazel_envoy_binary_build fastbuild exit 0 elif [[ "$CI_TARGET" == "bazel.debug" ]]; then setup_clang_toolchain @@ -237,12 +249,12 @@ elif [[ "$CI_TARGET" == "bazel.debug" ]]; then bazel test "${BAZEL_BUILD_OPTIONS[@]}" -c dbg "${TEST_TARGETS[@]}" echo "bazel debug build with tests..." - bazel_binary_build debug + bazel_envoy_binary_build debug exit 0 elif [[ "$CI_TARGET" == "bazel.debug.server_only" ]]; then setup_clang_toolchain echo "bazel debug build..." - bazel_binary_build debug + bazel_envoy_binary_build debug exit 0 elif [[ "$CI_TARGET" == "bazel.asan" ]]; then setup_clang_toolchain @@ -295,7 +307,7 @@ elif [[ "$CI_TARGET" == "bazel.dev" ]]; then # This doesn't go into CI but is available for developer convenience. echo "bazel fastbuild build with tests..." echo "Building..." - bazel_binary_build fastbuild + bazel_envoy_binary_build fastbuild echo "Testing ${TEST_TARGETS[*]}" bazel test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild "${TEST_TARGETS[@]}" @@ -354,6 +366,8 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then export LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config echo "Validating API structure..." "${ENVOY_SRCDIR}"/tools/api/validate_structure.py + echo "Validate Golang protobuf generation..." + "${ENVOY_SRCDIR}"/tools/api/generate_go_protobuf.py echo "Testing API and API Boosting..." bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \ @envoy_api_canonical//tools:tap2pcap_test @envoy_dev//clang_tools/api_booster/... @@ -363,6 +377,16 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet. BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" python3.8 "${ENVOY_SRCDIR}"/tools/api_boost/api_boost_test.py exit 0 +elif [[ "$CI_TARGET" == "bazel.api_compat" ]]; then + echo "Building buf..." + bazel build @com_github_bufbuild_buf//:buf + BUF_PATH=$(realpath "bazel-source/external/com_github_bufbuild_buf/bin/buf") + echo "Checking API for breaking changes to protobuf backwards compatibility..." + BASE_BRANCH_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh) + COMMIT_TITLE=$(git log -n 1 --pretty='format:%C(auto)%h (%s, %ad)' "${BASE_BRANCH_REF}") + echo -e "\tUsing base commit ${COMMIT_TITLE}" + "${ENVOY_SRCDIR}"/tools/api_proto_breaking_change_detector/detector_ci.sh "${BUF_PATH}" "${BASE_BRANCH_REF}" + exit 0 elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain echo "${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS[*]}" diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index 5ccf2dbd0c614..7fb99271c63cc 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -23,6 +23,7 @@ config_env() { build_platforms() { TYPE=$1 FILE_SUFFIX="${TYPE/-debug/}" + FILE_SUFFIX="${FILE_SUFFIX/-contrib/}" if is_windows; then echo "windows/amd64" @@ -36,8 +37,13 @@ build_platforms() { build_args() { TYPE=$1 FILE_SUFFIX="${TYPE/-debug/}" + FILE_SUFFIX="${FILE_SUFFIX/-contrib/}" printf ' -f ci/Dockerfile-envoy%s' "${FILE_SUFFIX}" + if [[ "${TYPE}" == *-contrib* ]]; then + printf ' --build-arg ENVOY_BINARY=envoy-contrib' + fi + if [[ "${TYPE}" == *-debug ]]; then printf ' --build-arg ENVOY_BINARY_SUFFIX=' elif [[ "${TYPE}" == "-google-vrp" ]]; then @@ -124,7 +130,7 @@ if is_windows; then BUILD_COMMAND=("build") else # "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency. - BUILD_TYPES=("" "-debug" "-alpine" "-distroless" "-google-vrp") + BUILD_TYPES=("" "-debug" "-contrib" "-contrib-debug" "-alpine" "-distroless" "-google-vrp") # Configure docker-buildx tools BUILD_COMMAND=("buildx" "build") @@ -141,7 +147,7 @@ for BUILD_TYPE in "${BUILD_TYPES[@]}"; do build_images "${BUILD_TYPE}" "$image_tag" if ! is_windows; then - if [[ "$BUILD_TYPE" == "" || "$BUILD_TYPE" == "-alpine" ]]; then + if [[ "$BUILD_TYPE" == "" || "$BUILD_TYPE" == "-contrib" || "$BUILD_TYPE" == "-alpine" ]]; then # verify_examples expects the base and alpine images, and for them to be named `-dev` dev_image="envoyproxy/envoy${BUILD_TYPE}-dev:latest" docker tag "$image_tag" "$dev_image" diff --git a/ci/flaky_test/requirements.txt b/ci/flaky_test/requirements.txt index 9fe42375ba22f..1e9f11f4cfa12 100644 --- a/ci/flaky_test/requirements.txt +++ b/ci/flaky_test/requirements.txt @@ -91,13 +91,13 @@ multidict==5.1.0 \ slackclient==2.9.3 \ --hash=sha256:2d68d668c02f4038299897e5c4723ab85dd40a3548354924b24f333a435856f8 \ --hash=sha256:07ec8fa76f6aa64852210ae235ff9e637ba78124e06c0b07a7eeea4abb955965 -typing-extensions==3.10.0.0 \ - --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ - --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 \ - --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 -wheel==0.36.2 \ - --hash=sha256:78b5b185f0e5763c26ca1e324373aadd49182ca90e825f7853f4b2509215dc0e \ - --hash=sha256:e11eefd162658ea59a60a0f6c7d493a7190ea4b9a85e335b33489d9f17e0245e +typing-extensions==3.10.0.2 \ + --hash=sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7 \ + --hash=sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34 \ + --hash=sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e +wheel==0.37.0 \ + --hash=sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd \ + --hash=sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad yarl==1.6.3 \ --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 30905c21ac98e..9511ebccb00d1 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -85,6 +85,7 @@ docker run --rm \ -e ENVOY_BUILD_IMAGE \ -e ENVOY_SRCDIR \ -e ENVOY_BUILD_TARGET \ + -e ENVOY_BUILD_DEBUG_INFORMATION \ -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ -e GCS_ARTIFACT_BUCKET \ -e GITHUB_TOKEN \ diff --git a/configs/configgen.sh b/configs/configgen.sh index 6f4ab35c26f7a..c383980715832 100755 --- a/configs/configgen.sh +++ b/configs/configgen.sh @@ -10,7 +10,10 @@ shift mkdir -p "$OUT_DIR/certs" mkdir -p "$OUT_DIR/lib" mkdir -p "$OUT_DIR/protos" -"$CONFIGGEN" "$OUT_DIR" + +if [[ "$CONFIGGEN" != "NO_CONFIGGEN" ]]; then + "$CONFIGGEN" "$OUT_DIR" +fi for FILE in "$@"; do case "$FILE" in @@ -33,4 +36,9 @@ for FILE in "$@"; do done # tar is having issues with -C for some reason so just cd into OUT_DIR. -(cd "$OUT_DIR"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem certs/*.der protos/*.pb lib/*.wasm lib/*.lua) +# Ignore files that don't exist so this script works for both core and contrib. +# shellcheck disable=SC2046 +# shellcheck disable=SC2035 +# TODO(mattklein123): I can't make this work when using the shellcheck suggestions. Try +# to fix this. +(cd "$OUT_DIR"; tar -hcvf example_configs.tar -- $(ls *.yaml certs/*.pem certs/*.der protos/*.pb lib/*.wasm lib/*.lua 2>/dev/null)) diff --git a/configs/envoy_double_proxy.template.yaml b/configs/envoy_double_proxy.template.yaml index e620b9024f09c..b574d6a518c5c 100644 --- a/configs/envoy_double_proxy.template.yaml +++ b/configs/envoy_double_proxy.template.yaml @@ -136,11 +136,6 @@ static_resources: address: front-proxy.yourcompany.net port_value: 9400 protocol: TCP - # There are so few connections going back - # that we can get some imbalance. Until we come up - # with a better solution just limit the requests - # so we can cycle and get better spread. - max_requests_per_connection: 25000 transport_socket: name: envoy.transport_sockets.tls typed_config: @@ -161,6 +156,12 @@ static_resources: "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions explicit_http_config: http2_protocol_options: {} + common_http_protocol_options: + # There are so few connections going back + # that we can get some imbalance. Until we come up + # with a better solution just limit the requests + # so we can cycle and get better spread. + max_requests_per_connection: 25000 - name: lightstep_saas type: LOGICAL_DNS lb_policy: ROUND_ROBIN diff --git a/contrib/BUILD b/contrib/BUILD new file mode 100644 index 0000000000000..aa0691c6142a8 --- /dev/null +++ b/contrib/BUILD @@ -0,0 +1,6 @@ +licenses(["notice"]) # Apache 2 + +exports_files([ + "extensions_metadata.yaml", + "contrib_build_config.bzl", +]) diff --git a/contrib/all_contrib_extensions.bzl b/contrib/all_contrib_extensions.bzl new file mode 100644 index 0000000000000..5a450825fd033 --- /dev/null +++ b/contrib/all_contrib_extensions.bzl @@ -0,0 +1,4 @@ +load(":contrib_build_config.bzl", "CONTRIB_EXTENSIONS") + +def envoy_all_contrib_extensions(): + return [v + "_envoy_extension" for v in CONTRIB_EXTENSIONS.values()] diff --git a/source/extensions/common/sqlutils/BUILD b/contrib/common/sqlutils/source/BUILD similarity index 84% rename from source/extensions/common/sqlutils/BUILD rename to contrib/common/sqlutils/source/BUILD index f477e6a422080..c100c39ae95c0 100644 --- a/source/extensions/common/sqlutils/BUILD +++ b/contrib/common/sqlutils/source/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_extension_package", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 -envoy_extension_package() +envoy_contrib_package() envoy_cc_library( name = "sqlutils_lib", diff --git a/source/extensions/common/sqlutils/sqlutils.cc b/contrib/common/sqlutils/source/sqlutils.cc similarity index 95% rename from source/extensions/common/sqlutils/sqlutils.cc rename to contrib/common/sqlutils/source/sqlutils.cc index 1c9a30d6e502d..dffa393bd0390 100644 --- a/source/extensions/common/sqlutils/sqlutils.cc +++ b/contrib/common/sqlutils/source/sqlutils.cc @@ -1,4 +1,4 @@ -#include "source/extensions/common/sqlutils/sqlutils.h" +#include "contrib/common/sqlutils/source/sqlutils.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/common/sqlutils/sqlutils.h b/contrib/common/sqlutils/source/sqlutils.h similarity index 100% rename from source/extensions/common/sqlutils/sqlutils.h rename to contrib/common/sqlutils/source/sqlutils.h diff --git a/test/extensions/common/sqlutils/BUILD b/contrib/common/sqlutils/test/BUILD similarity index 69% rename from test/extensions/common/sqlutils/BUILD rename to contrib/common/sqlutils/test/BUILD index 0277e47706b2d..e89cb0138b712 100644 --- a/test/extensions/common/sqlutils/BUILD +++ b/contrib/common/sqlutils/test/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", - "envoy_package", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_contrib_package() envoy_cc_test( name = "sqlutils_tests", @@ -15,6 +15,6 @@ envoy_cc_test( ], external_deps = ["sqlparser"], deps = [ - "//source/extensions/common/sqlutils:sqlutils_lib", + "//contrib/common/sqlutils/source:sqlutils_lib", ], ) diff --git a/test/extensions/common/sqlutils/sqlutils_test.cc b/contrib/common/sqlutils/test/sqlutils_test.cc similarity index 99% rename from test/extensions/common/sqlutils/sqlutils_test.cc rename to contrib/common/sqlutils/test/sqlutils_test.cc index f7464b668901d..e58f03eab5f7d 100644 --- a/test/extensions/common/sqlutils/sqlutils_test.cc +++ b/contrib/common/sqlutils/test/sqlutils_test.cc @@ -1,5 +1,4 @@ -#include "source/extensions/common/sqlutils/sqlutils.h" - +#include "contrib/common/sqlutils/source/sqlutils.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/contrib/contrib_build_config.bzl b/contrib/contrib_build_config.bzl new file mode 100644 index 0000000000000..34ef00af9fd15 --- /dev/null +++ b/contrib/contrib_build_config.bzl @@ -0,0 +1,18 @@ +# See bazel/README.md for details on how this system works. +CONTRIB_EXTENSIONS = { + # + # HTTP filters + # + + "envoy.filters.http.squash": "//contrib/squash/filters/http/source:config", + "envoy.filters.http.sxg": "//contrib/sxg/filters/http/source:config", + + # + # Network filters + # + + "envoy.filters.network.kafka_broker": "//contrib/kafka/filters/network/source:kafka_broker_config_lib", + "envoy.filters.network.mysql_proxy": "//contrib/mysql_proxy/filters/network/source:config", + "envoy.filters.network.postgres_proxy": "//contrib/postgres_proxy/filters/network/source:config", + "envoy.filters.network.rocketmq_proxy": "//contrib/rocketmq_proxy/filters/network/source:config", +} diff --git a/contrib/exe/BUILD b/contrib/exe/BUILD new file mode 100644 index 0000000000000..b70a786989d7a --- /dev/null +++ b/contrib/exe/BUILD @@ -0,0 +1,41 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_binary", + "envoy_cc_test", + "envoy_contrib_package", +) +load( + "//contrib:all_contrib_extensions.bzl", + "envoy_all_contrib_extensions", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +alias( + name = "envoy", + actual = ":envoy-static", +) + +envoy_cc_binary( + name = "envoy-static", + stamped = True, + deps = ["//source/exe:envoy_main_entry_lib"] + envoy_all_contrib_extensions(), +) + +envoy_cc_test( + name = "example_configs_test", + size = "large", + data = [ + "//examples:contrib_configs", + "//test/config_test:example_configs_test_setup.sh", + ], + env = { + "EXAMPLE_CONFIGS_TAR_PATH": "envoy/examples/example_configs.tar", + "DISABLE_TEST_MERGE": "true", + }, + deps = [ + "//test/config_test:example_configs_test_lib", + ] + envoy_all_contrib_extensions(), +) diff --git a/contrib/extensions_metadata.yaml b/contrib/extensions_metadata.yaml new file mode 100644 index 0000000000000..c3ccc61e53ee1 --- /dev/null +++ b/contrib/extensions_metadata.yaml @@ -0,0 +1,31 @@ +envoy.filters.http.squash: + categories: + - envoy.filters.http + security_posture: requires_trusted_downstream_and_upstream + status: stable +envoy.filters.http.sxg: + categories: + - envoy.filters.http + security_posture: robust_to_untrusted_downstream + status: alpha +envoy.filters.network.kafka_broker: + categories: + - envoy.filters.network + security_posture: requires_trusted_downstream_and_upstream + status: wip +envoy.filters.network.rocketmq_proxy: + categories: + - envoy.filters.network + security_posture: requires_trusted_downstream_and_upstream + status: alpha +envoy.filters.network.mysql_proxy: + categories: + - envoy.filters.network + security_posture: requires_trusted_downstream_and_upstream + status: alpha +envoy.filters.network.postgres_proxy: + categories: + - envoy.filters.network + security_posture: requires_trusted_downstream_and_upstream + status: stable + diff --git a/source/extensions/filters/network/kafka/BUILD b/contrib/kafka/filters/network/source/BUILD similarity index 96% rename from source/extensions/filters/network/kafka/BUILD rename to contrib/kafka/filters/network/source/BUILD index a6f2677a6075d..ec196b5e9abe2 100644 --- a/source/extensions/filters/network/kafka/BUILD +++ b/contrib/kafka/filters/network/source/BUILD @@ -1,20 +1,20 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", + "envoy_cc_contrib_extension", "envoy_cc_library", - "envoy_extension_package", + "envoy_contrib_package", ) load("@rules_python//python:defs.bzl", "py_binary", "py_library") load("@kafka_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 +envoy_contrib_package() + # Kafka network filter. # Broker filter public docs: docs/root/configuration/network_filters/kafka_broker_filter.rst -envoy_extension_package() - -envoy_cc_extension( +envoy_cc_contrib_extension( name = "kafka_broker_config_lib", srcs = ["broker/config.cc"], hdrs = ["broker/config.h"], @@ -22,7 +22,7 @@ envoy_cc_extension( ":kafka_broker_filter_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/network/kafka_broker/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/network/kafka/broker/config.cc b/contrib/kafka/filters/network/source/broker/config.cc similarity index 88% rename from source/extensions/filters/network/kafka/broker/config.cc rename to contrib/kafka/filters/network/source/broker/config.cc index 5e8620728f66c..459ce5fd20d81 100644 --- a/source/extensions/filters/network/kafka/broker/config.cc +++ b/contrib/kafka/filters/network/source/broker/config.cc @@ -1,10 +1,10 @@ -#include "source/extensions/filters/network/kafka/broker/config.h" +#include "contrib/kafka/filters/network/source/broker/config.h" #include "envoy/registry/registry.h" #include "envoy/server/filter_config.h" #include "envoy/stats/scope.h" -#include "source/extensions/filters/network/kafka/broker/filter.h" +#include "contrib/kafka/filters/network/source/broker/filter.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/broker/config.h b/contrib/kafka/filters/network/source/broker/config.h similarity index 84% rename from source/extensions/filters/network/kafka/broker/config.h rename to contrib/kafka/filters/network/source/broker/config.h index 62ed619aedece..c41a1cdf55d53 100644 --- a/source/extensions/filters/network/kafka/broker/config.h +++ b/contrib/kafka/filters/network/source/broker/config.h @@ -1,11 +1,11 @@ #pragma once -#include "envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.pb.h" -#include "envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.pb.validate.h" - #include "source/extensions/filters/network/common/factory_base.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.pb.h" +#include "contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.pb.validate.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/kafka/broker/filter.cc b/contrib/kafka/filters/network/source/broker/filter.cc similarity index 98% rename from source/extensions/filters/network/kafka/broker/filter.cc rename to contrib/kafka/filters/network/source/broker/filter.cc index 88427ea6f35d8..855226780ebd7 100644 --- a/source/extensions/filters/network/kafka/broker/filter.cc +++ b/contrib/kafka/filters/network/source/broker/filter.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/kafka/broker/filter.h" +#include "contrib/kafka/filters/network/source/broker/filter.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/broker/filter.h b/contrib/kafka/filters/network/source/broker/filter.h similarity index 94% rename from source/extensions/filters/network/kafka/broker/filter.h rename to contrib/kafka/filters/network/source/broker/filter.h index 57c63de0dcf04..207115838000a 100644 --- a/source/extensions/filters/network/kafka/broker/filter.h +++ b/contrib/kafka/filters/network/source/broker/filter.h @@ -4,13 +4,13 @@ #include "envoy/stats/scope.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/kafka/external/request_metrics.h" -#include "source/extensions/filters/network/kafka/external/response_metrics.h" -#include "source/extensions/filters/network/kafka/parser.h" -#include "source/extensions/filters/network/kafka/request_codec.h" -#include "source/extensions/filters/network/kafka/response_codec.h" #include "absl/container/flat_hash_map.h" +#include "contrib/kafka/filters/network/source/external/request_metrics.h" +#include "contrib/kafka/filters/network/source/external/response_metrics.h" +#include "contrib/kafka/filters/network/source/parser.h" +#include "contrib/kafka/filters/network/source/request_codec.h" +#include "contrib/kafka/filters/network/source/response_codec.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/codec.h b/contrib/kafka/filters/network/source/codec.h similarity index 100% rename from source/extensions/filters/network/kafka/codec.h rename to contrib/kafka/filters/network/source/codec.h diff --git a/source/extensions/filters/network/kafka/kafka_request.h b/contrib/kafka/filters/network/source/kafka_request.h similarity index 95% rename from source/extensions/filters/network/kafka/kafka_request.h rename to contrib/kafka/filters/network/source/kafka_request.h index 3c9cc414083e0..407e0e5c09f23 100644 --- a/source/extensions/filters/network/kafka/kafka_request.h +++ b/contrib/kafka/filters/network/source/kafka_request.h @@ -2,9 +2,9 @@ #include "envoy/common/exception.h" -#include "source/extensions/filters/network/kafka/external/serialization_composite.h" -#include "source/extensions/filters/network/kafka/serialization.h" -#include "source/extensions/filters/network/kafka/tagged_fields.h" +#include "contrib/kafka/filters/network/source/external/serialization_composite.h" +#include "contrib/kafka/filters/network/source/serialization.h" +#include "contrib/kafka/filters/network/source/tagged_fields.h" namespace Envoy { namespace Extensions { @@ -163,7 +163,6 @@ template class Request : public AbstractRequest { return request_header_ == rhs.request_header_ && data_ == rhs.data_; }; -private: const Data data_; }; diff --git a/source/extensions/filters/network/kafka/kafka_request_parser.cc b/contrib/kafka/filters/network/source/kafka_request_parser.cc similarity index 97% rename from source/extensions/filters/network/kafka/kafka_request_parser.cc rename to contrib/kafka/filters/network/source/kafka_request_parser.cc index c713dad186a39..cd8d25fe5d335 100644 --- a/source/extensions/filters/network/kafka/kafka_request_parser.cc +++ b/contrib/kafka/filters/network/source/kafka_request_parser.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/kafka/kafka_request_parser.h" +#include "contrib/kafka/filters/network/source/kafka_request_parser.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/kafka_request_parser.h b/contrib/kafka/filters/network/source/kafka_request_parser.h similarity index 97% rename from source/extensions/filters/network/kafka/kafka_request_parser.h rename to contrib/kafka/filters/network/source/kafka_request_parser.h index 8afdc769de085..e9dfa6fe072f8 100644 --- a/source/extensions/filters/network/kafka/kafka_request_parser.h +++ b/contrib/kafka/filters/network/source/kafka_request_parser.h @@ -5,9 +5,10 @@ #include "envoy/common/exception.h" #include "source/common/common/assert.h" -#include "source/extensions/filters/network/kafka/kafka_request.h" -#include "source/extensions/filters/network/kafka/parser.h" -#include "source/extensions/filters/network/kafka/tagged_fields.h" + +#include "contrib/kafka/filters/network/source/kafka_request.h" +#include "contrib/kafka/filters/network/source/parser.h" +#include "contrib/kafka/filters/network/source/tagged_fields.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/kafka_response.h b/contrib/kafka/filters/network/source/kafka_response.h similarity index 95% rename from source/extensions/filters/network/kafka/kafka_response.h rename to contrib/kafka/filters/network/source/kafka_response.h index 805d46defb698..32bc8317f5131 100644 --- a/source/extensions/filters/network/kafka/kafka_response.h +++ b/contrib/kafka/filters/network/source/kafka_response.h @@ -1,8 +1,8 @@ #pragma once -#include "source/extensions/filters/network/kafka/external/serialization_composite.h" -#include "source/extensions/filters/network/kafka/serialization.h" -#include "source/extensions/filters/network/kafka/tagged_fields.h" +#include "contrib/kafka/filters/network/source/external/serialization_composite.h" +#include "contrib/kafka/filters/network/source/serialization.h" +#include "contrib/kafka/filters/network/source/tagged_fields.h" namespace Envoy { namespace Extensions { @@ -141,7 +141,6 @@ template class Response : public AbstractResponse { return metadata_ == rhs.metadata_ && data_ == rhs.data_; }; -private: const Data data_; }; diff --git a/source/extensions/filters/network/kafka/kafka_response_parser.cc b/contrib/kafka/filters/network/source/kafka_response_parser.cc similarity index 97% rename from source/extensions/filters/network/kafka/kafka_response_parser.cc rename to contrib/kafka/filters/network/source/kafka_response_parser.cc index dc0a8a9b1f983..44177882a5c08 100644 --- a/source/extensions/filters/network/kafka/kafka_response_parser.cc +++ b/contrib/kafka/filters/network/source/kafka_response_parser.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/kafka/kafka_response_parser.h" +#include "contrib/kafka/filters/network/source/kafka_response_parser.h" #include "absl/strings/str_cat.h" diff --git a/source/extensions/filters/network/kafka/kafka_response_parser.h b/contrib/kafka/filters/network/source/kafka_response_parser.h similarity index 97% rename from source/extensions/filters/network/kafka/kafka_response_parser.h rename to contrib/kafka/filters/network/source/kafka_response_parser.h index ab3fd2d66b1df..56cf350592ea6 100644 --- a/source/extensions/filters/network/kafka/kafka_response_parser.h +++ b/contrib/kafka/filters/network/source/kafka_response_parser.h @@ -3,9 +3,9 @@ #include #include -#include "source/extensions/filters/network/kafka/kafka_response.h" -#include "source/extensions/filters/network/kafka/parser.h" -#include "source/extensions/filters/network/kafka/tagged_fields.h" +#include "contrib/kafka/filters/network/source/kafka_response.h" +#include "contrib/kafka/filters/network/source/parser.h" +#include "contrib/kafka/filters/network/source/tagged_fields.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/kafka_types.h b/contrib/kafka/filters/network/source/kafka_types.h similarity index 100% rename from source/extensions/filters/network/kafka/kafka_types.h rename to contrib/kafka/filters/network/source/kafka_types.h diff --git a/contrib/kafka/filters/network/source/mesh/BUILD b/contrib/kafka/filters/network/source/mesh/BUILD new file mode 100644 index 0000000000000..fe24168a884b0 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/BUILD @@ -0,0 +1,131 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_contrib_package", +) +load("//bazel:envoy_internal.bzl", "envoy_external_dep_path") + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +# Kafka-mesh network filter. + +envoy_cc_library( + name = "filter_lib", + srcs = ["filter.cc"], + hdrs = [ + "filter.h", + ], + tags = ["skip_on_windows"], + deps = [ + ":abstract_command_lib", + ":request_processor_lib", + ":upstream_config_lib", + ":upstream_kafka_facade_lib", + "//contrib/kafka/filters/network/source:kafka_request_codec_lib", + "//contrib/kafka/filters/network/source:kafka_response_codec_lib", + "//envoy/buffer:buffer_interface", + "//envoy/network:connection_interface", + "//envoy/network:filter_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_library( + name = "request_processor_lib", + srcs = [ + "request_processor.cc", + ], + hdrs = [ + "request_processor.h", + ], + tags = ["skip_on_windows"], + deps = [ + ":abstract_command_lib", + ":upstream_config_lib", + ":upstream_kafka_facade_lib", + "//contrib/kafka/filters/network/source:kafka_request_codec_lib", + "//contrib/kafka/filters/network/source:kafka_request_parser_lib", + "//contrib/kafka/filters/network/source/mesh/command_handlers:api_versions_lib", + "//contrib/kafka/filters/network/source/mesh/command_handlers:metadata_lib", + "//contrib/kafka/filters/network/source/mesh/command_handlers:produce_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_library( + name = "abstract_command_lib", + srcs = [ + "abstract_command.cc", + ], + hdrs = [ + "abstract_command.h", + ], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source:kafka_response_lib", + "//contrib/kafka/filters/network/source:tagged_fields_lib", + ], +) + +envoy_cc_library( + name = "upstream_kafka_facade_lib", + srcs = [ + "upstream_kafka_facade.cc", + ], + hdrs = [ + "upstream_kafka_facade.h", + ], + tags = ["skip_on_windows"], + deps = [ + ":upstream_config_lib", + ":upstream_kafka_client_impl_lib", + ":upstream_kafka_client_lib", + "//envoy/thread:thread_interface", + "//envoy/thread_local:thread_local_interface", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_library( + name = "upstream_kafka_client_lib", + srcs = [ + ], + hdrs = [ + "upstream_kafka_client.h", + ], + tags = ["skip_on_windows"], + deps = [ + ], +) + +envoy_cc_library( + name = "upstream_kafka_client_impl_lib", + srcs = [ + "upstream_kafka_client_impl.cc", + ], + hdrs = [ + "upstream_kafka_client_impl.h", + ], + tags = ["skip_on_windows"], + deps = [ + ":upstream_kafka_client_lib", + "//envoy/event:dispatcher_interface", + "//source/common/common:minimal_logger_lib", + envoy_external_dep_path("librdkafka"), + ], +) + +envoy_cc_library( + name = "upstream_config_lib", + srcs = [ + ], + hdrs = [ + "upstream_config.h", + ], + tags = ["skip_on_windows"], + deps = [ + ], +) diff --git a/source/extensions/filters/network/kafka/mesh/abstract_command.cc b/contrib/kafka/filters/network/source/mesh/abstract_command.cc similarity index 89% rename from source/extensions/filters/network/kafka/mesh/abstract_command.cc rename to contrib/kafka/filters/network/source/mesh/abstract_command.cc index eab6dbb47df5d..c209331038361 100644 --- a/source/extensions/filters/network/kafka/mesh/abstract_command.cc +++ b/contrib/kafka/filters/network/source/mesh/abstract_command.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" +#include "contrib/kafka/filters/network/source/mesh/abstract_command.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/mesh/abstract_command.h b/contrib/kafka/filters/network/source/mesh/abstract_command.h similarity index 97% rename from source/extensions/filters/network/kafka/mesh/abstract_command.h rename to contrib/kafka/filters/network/source/mesh/abstract_command.h index 40cbb18396f50..e7ee458d1e7dc 100644 --- a/source/extensions/filters/network/kafka/mesh/abstract_command.h +++ b/contrib/kafka/filters/network/source/mesh/abstract_command.h @@ -1,7 +1,8 @@ #pragma once #include "source/common/common/logger.h" -#include "source/extensions/filters/network/kafka/kafka_response.h" + +#include "contrib/kafka/filters/network/source/kafka_response.h" namespace Envoy { namespace Extensions { diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD b/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD new file mode 100644 index 0000000000000..6891a3c3ea574 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/BUILD @@ -0,0 +1,94 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +# Handlers for particular Kafka requests that are used by Kafka-mesh filter. + +envoy_cc_library( + name = "produce_lib", + srcs = [ + "produce.cc", + ], + hdrs = [ + "produce.h", + ], + tags = ["skip_on_windows"], + deps = [ + ":produce_outbound_record_lib", + ":produce_record_extractor_lib", + "//contrib/kafka/filters/network/source:kafka_request_parser_lib", + "//contrib/kafka/filters/network/source:kafka_response_parser_lib", + "//contrib/kafka/filters/network/source/mesh:abstract_command_lib", + "//contrib/kafka/filters/network/source/mesh:upstream_kafka_facade_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_library( + name = "produce_outbound_record_lib", + srcs = [ + ], + hdrs = [ + "produce_outbound_record.h", + ], + tags = ["skip_on_windows"], + deps = [ + ], +) + +envoy_cc_library( + name = "produce_record_extractor_lib", + srcs = [ + "produce_record_extractor.cc", + ], + hdrs = [ + "produce_record_extractor.h", + ], + tags = ["skip_on_windows"], + deps = [ + ":produce_outbound_record_lib", + "//contrib/kafka/filters/network/source:kafka_request_parser_lib", + ], +) + +envoy_cc_library( + name = "metadata_lib", + srcs = [ + "metadata.cc", + ], + hdrs = [ + "metadata.h", + ], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source:kafka_request_parser_lib", + "//contrib/kafka/filters/network/source:kafka_response_parser_lib", + "//contrib/kafka/filters/network/source/mesh:abstract_command_lib", + "//contrib/kafka/filters/network/source/mesh:upstream_config_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_library( + name = "api_versions_lib", + srcs = [ + "api_versions.cc", + ], + hdrs = [ + "api_versions.h", + ], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source:kafka_request_parser_lib", + "//contrib/kafka/filters/network/source:kafka_response_parser_lib", + "//contrib/kafka/filters/network/source:tagged_fields_lib", + "//contrib/kafka/filters/network/source/mesh:abstract_command_lib", + "//source/common/common:minimal_logger_lib", + ], +) diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc new file mode 100644 index 0000000000000..1fa8cfa8f5b82 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc @@ -0,0 +1,54 @@ +#include "contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.h" + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/external/responses.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +// These constants define which versions of requests this "Kafka server" will understand. + +// As we can process only record format 2 (which itself is pretty old coming from Kafka 1.0), we are +// going to handle only produce requests with versions higher than 5. +constexpr int16_t MIN_PRODUCE_SUPPORTED = 5; +constexpr int16_t MAX_PRODUCE_SUPPORTED = PRODUCE_REQUEST_MAX_VERSION; /* Generated value. */ +// Right now we do not want to handle old version 0 request, as it expects us to enumerate all +// topics if list of requested topics is empty. +// Impl note: if filter gains knowledge of upstream cluster topics (e.g. thru admin clients), we +// could decrease this value. +constexpr int16_t MIN_METADATA_SUPPORTED = 1; +constexpr int16_t MAX_METADATA_SUPPORTED = METADATA_REQUEST_MAX_VERSION; /* Generated value. */ + +ApiVersionsRequestHolder::ApiVersionsRequestHolder(AbstractRequestListener& filter, + const RequestHeader request_header) + : BaseInFlightRequest{filter}, request_header_{request_header} {} + +// Api Versions requests are immediately ready for answer (as they do not need to reach upstream). +void ApiVersionsRequestHolder::startProcessing() { notifyFilter(); } + +// Because these requests can be trivially handled, the responses are okay to be sent downstream at +// any time. +bool ApiVersionsRequestHolder::finished() const { return true; } + +AbstractResponseSharedPtr ApiVersionsRequestHolder::computeAnswer() const { + const ResponseMetadata metadata = {request_header_.api_key_, request_header_.api_version_, + request_header_.correlation_id_}; + + const int16_t error_code = 0; + const ApiVersionsResponseKey produce_entry = {PRODUCE_REQUEST_API_KEY, MIN_PRODUCE_SUPPORTED, + MAX_PRODUCE_SUPPORTED}; + const ApiVersionsResponseKey metadata_entry = {METADATA_REQUEST_API_KEY, MIN_METADATA_SUPPORTED, + MAX_METADATA_SUPPORTED}; + const ApiVersionsResponse real_response = {error_code, {produce_entry, metadata_entry}}; + + return std::make_shared>(metadata, real_response); +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.h b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.h new file mode 100644 index 0000000000000..11344c380aa85 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.h @@ -0,0 +1,36 @@ +#pragma once + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/mesh/abstract_command.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Api version requests are the first requests sent by Kafka clients to brokers. + * We send our customized response to fail clients that might be trying to accomplish something more + * than this filter supports. + */ +class ApiVersionsRequestHolder : public BaseInFlightRequest { +public: + ApiVersionsRequestHolder(AbstractRequestListener& filter, const RequestHeader request_header); + + void startProcessing() override; + + bool finished() const override; + + AbstractResponseSharedPtr computeAnswer() const override; + +private: + // Original request header. + const RequestHeader request_header_; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc new file mode 100644 index 0000000000000..05b63b451d1fa --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc @@ -0,0 +1,66 @@ +#include "contrib/kafka/filters/network/source/mesh/command_handlers/metadata.h" + +#include "contrib/kafka/filters/network/source/external/responses.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +MetadataRequestHolder::MetadataRequestHolder( + AbstractRequestListener& filter, const UpstreamKafkaConfiguration& configuration, + const std::shared_ptr> request) + : BaseInFlightRequest{filter}, configuration_{configuration}, request_{request} {} + +// Metadata requests are immediately ready for answer (as they do not need to reach upstream). +void MetadataRequestHolder::startProcessing() { notifyFilter(); } + +bool MetadataRequestHolder::finished() const { return true; } + +constexpr int32_t ENVOY_BROKER_ID = 0; +constexpr int32_t NO_ERROR = 0; + +// Cornerstone of how the mesh-filter actually works. +// We pretend to be one-node Kafka cluster, with Envoy instance being the only member. +// What means all the Kafka future traffic will go through this instance. +AbstractResponseSharedPtr MetadataRequestHolder::computeAnswer() const { + const auto& header = request_->request_header_; + const ResponseMetadata metadata = {header.api_key_, header.api_version_, header.correlation_id_}; + + const auto advertised_address = configuration_.getAdvertisedAddress(); + MetadataResponseBroker broker = {ENVOY_BROKER_ID, advertised_address.first, + advertised_address.second}; + std::vector response_topics; + if (request_->data_.topics_) { + for (const auto& topic : *(request_->data_.topics_)) { + const std::string& topic_name = topic.name_; + std::vector topic_partitions; + const absl::optional cluster_config = + configuration_.computeClusterConfigForTopic(topic_name); + if (!cluster_config) { + // Someone is requesting topics that are not known to our configuration. + // So we do not attach any metadata, this will cause clients failures downstream as they + // will never be able to get metadata for these topics. + continue; + } + for (int32_t partition_id = 0; partition_id < cluster_config->partition_count_; + ++partition_id) { + // Every partition is hosted by this proxy-broker. + MetadataResponsePartition partition = { + NO_ERROR, partition_id, broker.node_id_, {broker.node_id_}, {broker.node_id_}}; + topic_partitions.push_back(partition); + } + MetadataResponseTopic response_topic = {NO_ERROR, topic_name, false, topic_partitions}; + response_topics.push_back(response_topic); + } + } + MetadataResponse data = {{broker}, broker.node_id_, response_topics}; + return std::make_shared>(metadata, data); +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.h b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.h new file mode 100644 index 0000000000000..97e7801aa052c --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.h @@ -0,0 +1,37 @@ +#pragma once + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/mesh/abstract_command.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_config.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +class MetadataRequestHolder : public BaseInFlightRequest { +public: + MetadataRequestHolder(AbstractRequestListener& filter, + const UpstreamKafkaConfiguration& configuration, + const std::shared_ptr> request); + + void startProcessing() override; + + bool finished() const override; + + AbstractResponseSharedPtr computeAnswer() const override; + +private: + // Configuration used to provide data for response. + const UpstreamKafkaConfiguration& configuration_; + + // Original request. + const std::shared_ptr> request_; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc new file mode 100644 index 0000000000000..e2ed06fdbb17e --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc @@ -0,0 +1,116 @@ +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce.h" + +#include "contrib/kafka/filters/network/source/external/responses.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +constexpr static int16_t NO_ERROR = 0; + +ProduceRequestHolder::ProduceRequestHolder(AbstractRequestListener& filter, + UpstreamKafkaFacade& kafka_facade, + const std::shared_ptr> request) + : ProduceRequestHolder{filter, kafka_facade, RecordExtractorImpl{}, request} {}; + +ProduceRequestHolder::ProduceRequestHolder(AbstractRequestListener& filter, + UpstreamKafkaFacade& kafka_facade, + const RecordExtractor& record_extractor, + const std::shared_ptr> request) + : BaseInFlightRequest{filter}, kafka_facade_{kafka_facade}, request_{request} { + outbound_records_ = record_extractor.extractRecords(request_->data_.topics_); + expected_responses_ = outbound_records_.size(); +} + +void ProduceRequestHolder::startProcessing() { + // Main part of the proxy: for each outbound record we get the appropriate sink (effectively a + // facade for upstream Kafka cluster), and send the record to it. + for (const auto& outbound_record : outbound_records_) { + KafkaProducer& producer = kafka_facade_.getProducerForTopic(outbound_record.topic_); + // We need to provide our object as first argument, as we will want to be notified when the + // delivery finishes. + producer.send(shared_from_this(), outbound_record.topic_, outbound_record.partition_, + outbound_record.key_, outbound_record.value_); + } + // Corner case handling: + // If we ever receive produce request without records, we need to notify the filter we are ready, + // because otherwise no notification will ever come from the real Kafka producer. + if (finished()) { + notifyFilter(); + } +} + +bool ProduceRequestHolder::finished() const { return 0 == expected_responses_; } + +// Find a record that matches provided delivery confirmation coming from Kafka producer. +// If all the records got their delivery data filled in, we are done, and can notify the origin +// filter. +bool ProduceRequestHolder::accept(const DeliveryMemento& memento) { + for (auto& outbound_record : outbound_records_) { + if (outbound_record.value_.data() == memento.data_) { + // We have matched the downstream request that matches our confirmation from upstream Kafka. + outbound_record.error_code_ = memento.error_code_; + outbound_record.saved_offset_ = memento.offset_; + --expected_responses_; + if (finished()) { + // All elements had their responses matched. + ENVOY_LOG(trace, "All deliveries finished for produce request {}", + request_->request_header_.correlation_id_); + notifyFilter(); + } + return true; + } + } + return false; +} + +AbstractResponseSharedPtr ProduceRequestHolder::computeAnswer() const { + + // Header. + const RequestHeader& rh = request_->request_header_; + ResponseMetadata metadata = {rh.api_key_, rh.api_version_, rh.correlation_id_}; + + // Real answer. + using ErrorCodeAndOffset = std::pair; + std::map> topic_to_partition_responses; + for (const auto& outbound_record : outbound_records_) { + auto& partition_map = topic_to_partition_responses[outbound_record.topic_]; + auto it = partition_map.find(outbound_record.partition_); + if (it == partition_map.end()) { + partition_map[outbound_record.partition_] = {outbound_record.error_code_, + outbound_record.saved_offset_}; + } else { + // Proxy logic - aggregating multiple upstream answers into single downstream answer. + // Let's fail if anything fails, otherwise use the lowest offset (like Kafka would have done). + ErrorCodeAndOffset& curr = it->second; + if (NO_ERROR == curr.first) { + curr.first = outbound_record.error_code_; + curr.second = std::min(curr.second, outbound_record.saved_offset_); + } + } + } + + std::vector topic_responses; + for (const auto& topic_entry : topic_to_partition_responses) { + std::vector partition_responses; + for (const auto& partition_entry : topic_entry.second) { + const int32_t& partition = partition_entry.first; + const int16_t& error_code = partition_entry.second.first; + const int64_t& offset = partition_entry.second.second; + partition_responses.emplace_back(partition, error_code, offset); + } + const std::string& topic = topic_entry.first; + topic_responses.emplace_back(topic, partition_responses); + } + + ProduceResponse data = {topic_responses, 0}; + return std::make_shared>(metadata, data); +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h new file mode 100644 index 0000000000000..04781366ea90f --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.h @@ -0,0 +1,95 @@ +#pragma once + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/mesh/abstract_command.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Kafka 'Produce' request, that is aimed at particular cluster. + * A single Produce request coming from downstream can map into multiple entries, + * as the topics can be hosted on different clusters. + * + * These requests stored in 2 places: this filter (request's origin) and in RichKafkaProducer + * instances (to match pure-Kafka confirmations to the requests). + * + * +--------------+ + * |<>+--------+ + * +-+------------+ | + * | | + * | | + * | | + * +---------------+ +-v---------------+ | + * |KafkaMeshFilter+--+ +--+RichKafkaProducer| | + * +-^-------------+ | | +-----------------+ | + * | | | | + * | | | | + * | | | | + * | +--------v-v---------+ | + * +-------+ProduceRequestHolder|----------+ | + * | | + * when-finished> | | | + * +---------v----------+ | | + * |PartitionProduceData| | | + * +---------^----------+ | | + * | | | + * +-----------------+----------------+ | | + * | | | | | + * +-----+--------+ +------+-------+ +------+----v--+ | + * |OutboundRecord| |OutboundRecord| |OutboundRecord<--+ + * +--------------+ +--------------+ +--------------+ + */ +class ProduceRequestHolder : public BaseInFlightRequest, + public ProduceFinishCb, + public std::enable_shared_from_this { +public: + ProduceRequestHolder(AbstractRequestListener& filter, UpstreamKafkaFacade& kafka_facade, + const std::shared_ptr> request); + + // Visible for testing. + ProduceRequestHolder(AbstractRequestListener& filter, UpstreamKafkaFacade& kafka_facade, + const RecordExtractor& record_extractor, + const std::shared_ptr> request); + + // AbstractInFlightRequest + void startProcessing() override; + + // AbstractInFlightRequest + bool finished() const override; + + // AbstractInFlightRequest + AbstractResponseSharedPtr computeAnswer() const override; + + // ProduceFinishCb + bool accept(const DeliveryMemento& memento) override; + +private: + // Access to Kafka producers pointing to upstream Kafka clusters. + UpstreamKafkaFacade& kafka_facade_; + + // Original request. + const std::shared_ptr> request_; + + // How many responses from Kafka Producer handling our request we still expect. + // This value decreases to 0 as we get confirmations from Kafka (successful or not). + int expected_responses_; + + // Real records extracted out of request. + std::vector outbound_records_; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h new file mode 100644 index 0000000000000..4174e3dea7e13 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +// Binds a single inbound record from Kafka client with its delivery information. +struct OutboundRecord { + + // These fields were received from downstream. + const std::string topic_; + const int32_t partition_; + const absl::string_view key_; + const absl::string_view value_; + + // These fields will get updated when delivery to upstream Kafka cluster finishes. + int16_t error_code_; + uint32_t saved_offset_; + + OutboundRecord(const std::string& topic, const int32_t partition, const absl::string_view key, + const absl::string_view value) + : topic_{topic}, partition_{partition}, key_{key}, value_{value}, error_code_{0}, + saved_offset_{0} {}; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc new file mode 100644 index 0000000000000..3c98dc4885cf9 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc @@ -0,0 +1,208 @@ +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +std::vector +RecordExtractorImpl::extractRecords(const std::vector& data) const { + std::vector result; + for (const auto& topic_data : data) { + for (const auto& partition_data : topic_data.partitions_) { + // Kafka protocol allows nullable data. + if (partition_data.records_) { + const auto topic_result = extractPartitionRecords( + topic_data.name_, partition_data.partition_index_, *(partition_data.records_)); + std::copy(topic_result.begin(), topic_result.end(), std::back_inserter(result)); + } + } + } + return result; +} + +// Fields common to any record batch payload. +// See: +// https://github.com/apache/kafka/blob/2.4.1/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java#L46 +constexpr unsigned int RECORD_BATCH_COMMON_FIELDS_SIZE = /* BaseOffset */ sizeof(int64_t) + + /* Length */ sizeof(int32_t) + + /* PartitionLeaderEpoch */ sizeof(int32_t); + +// Magic format introduced around Kafka 1.0.0 and still used with Kafka 2.4. +// We can extract records out of record batches that use this magic. +constexpr int8_t SUPPORTED_MAGIC = 2; + +// Reference implementation: +// https://github.com/apache/kafka/blob/2.4.1/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java#L443 +std::vector RecordExtractorImpl::extractPartitionRecords(const std::string& topic, + const int32_t partition, + const Bytes& bytes) const { + + absl::string_view data = {reinterpret_cast(bytes.data()), bytes.size()}; + + // Let's skip these common fields, because we are not using them. + if (data.length() < RECORD_BATCH_COMMON_FIELDS_SIZE) { + throw EnvoyException(fmt::format("record batch for [{}-{}] is too short (no common fields): {}", + topic, partition, data.length())); + } + data = {data.data() + RECORD_BATCH_COMMON_FIELDS_SIZE, + data.length() - RECORD_BATCH_COMMON_FIELDS_SIZE}; + + // Extract magic - it what is the format of records present in the bytes provided. + Int8Deserializer magic_deserializer; + magic_deserializer.feed(data); + if (!magic_deserializer.ready()) { + throw EnvoyException( + fmt::format("magic byte is not present in record batch for [{}-{}]", topic, partition)); + } + + // Old client sending old magic, or Apache Kafka introducing new magic. + const int8_t magic = magic_deserializer.get(); + if (SUPPORTED_MAGIC != magic) { + throw EnvoyException(fmt::format("unknown magic value in record batch for [{}-{}]: {}", topic, + partition, magic)); + } + + // We have received a record batch with good magic. + return processRecordBatch(topic, partition, data); +} + +// Record batch fields we are going to ignore (because we rip it up and send its contents). +// See: +// https://github.com/apache/kafka/blob/2.4.1/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java#L50 +// and: +// https://github.com/apache/kafka/blob/2.4.1/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java#L471 +constexpr unsigned int IGNORED_FIELDS_SIZE = + /* CRC */ sizeof(int32_t) + /* Attributes */ sizeof(int16_t) + + /* LastOffsetDelta */ sizeof(int32_t) + /* FirstTimestamp */ sizeof(int64_t) + + /* MaxTimestamp */ sizeof(int64_t) + /* ProducerId */ sizeof(int64_t) + + /* ProducerEpoch */ sizeof(int16_t) + /* BaseSequence */ sizeof(int32_t) + + /* RecordCount */ sizeof(int32_t); + +std::vector RecordExtractorImpl::processRecordBatch(const std::string& topic, + const int32_t partition, + absl::string_view data) const { + + if (data.length() < IGNORED_FIELDS_SIZE) { + throw EnvoyException( + fmt::format("record batch for [{}-{}] is too short (no attribute fields): {}", topic, + partition, data.length())); + } + data = {data.data() + IGNORED_FIELDS_SIZE, data.length() - IGNORED_FIELDS_SIZE}; + + // We have managed to consume all the fancy bytes, now it's time to get to records. + std::vector result; + while (!data.empty()) { + const OutboundRecord record = extractRecord(topic, partition, data); + result.push_back(record); + } + return result; +} + +// Reference implementation: +// https://github.com/apache/kafka/blob/2.4.1/clients/src/main/java/org/apache/kafka/common/record/DefaultRecord.java#L179 +OutboundRecord RecordExtractorImpl::extractRecord(const std::string& topic, const int32_t partition, + absl::string_view& data) const { + + VarInt32Deserializer length; + length.feed(data); + if (!length.ready()) { + throw EnvoyException( + fmt::format("record for [{}-{}] is too short (no length)", topic, partition)); + } + const int32_t len = length.get(); + if (len < 0) { + throw EnvoyException( + fmt::format("record for [{}-{}] has invalid length: {}", topic, partition, len)); + } + if (static_cast(len) > data.length()) { + throw EnvoyException(fmt::format("record for [{}-{}] is too short (not enough bytes provided)", + topic, partition)); + } + + const absl::string_view expected_end_of_record = {data.data() + len, data.length() - len}; + + // We throw away the following batch fields: attributes, timestamp delta, offset delta (cannot do + // an easy jump, as some are variable-length). + Int8Deserializer attributes; + attributes.feed(data); + VarInt64Deserializer tsDelta; + tsDelta.feed(data); + VarUInt32Deserializer offsetDelta; + offsetDelta.feed(data); + if (!attributes.ready() || !tsDelta.ready() || !offsetDelta.ready()) { + throw EnvoyException( + fmt::format("attributes not present in record for [{}-{}]", topic, partition)); + } + + // Record key and value. + const absl::string_view key = extractByteArray(data); + const absl::string_view value = extractByteArray(data); + + // Headers. + VarInt32Deserializer headers_count_deserializer; + headers_count_deserializer.feed(data); + if (!headers_count_deserializer.ready()) { + throw EnvoyException( + fmt::format("header count not present in record for [{}-{}]", topic, partition)); + } + const int32_t headers_count = headers_count_deserializer.get(); + if (headers_count < 0) { + throw EnvoyException(fmt::format("invalid header count in record for [{}-{}]: {}", topic, + partition, headers_count)); + } + for (int32_t i = 0; i < headers_count; ++i) { + // For now, we ignore headers. + extractByteArray(data); // Header key. + extractByteArray(data); // Header value. + } + + if (data == expected_end_of_record) { + // We have consumed everything nicely. + return OutboundRecord{topic, partition, key, value}; + } else { + // Bad data - there are bytes left. + throw EnvoyException(fmt::format("data left after consuming record for [{}-{}]: {}", topic, + partition, data.length())); + } +} + +absl::string_view RecordExtractorImpl::extractByteArray(absl::string_view& input) { + + // Get the length. + VarInt32Deserializer length_deserializer; + length_deserializer.feed(input); + if (!length_deserializer.ready()) { + throw EnvoyException("byte array length not present"); + } + const int32_t length = length_deserializer.get(); + + // Length can be -1 (null value was published by client). + if (-1 == length) { + return {}; + } + + // Otherwise, length cannot be negative. + if (length < 0) { + throw EnvoyException(fmt::format("byte array length less than -1: {}", length)); + } + + // Underflow handling. + if (static_cast(length) > input.size()) { + throw EnvoyException( + fmt::format("byte array length larger than data provided: {} vs {}", length, input.size())); + } + + // We have enough data to return it. + const absl::string_view result = {input.data(), + static_cast(length)}; + input = {input.data() + length, input.length() - length}; + return result; +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h new file mode 100644 index 0000000000000..59c6e7380e4fa --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h @@ -0,0 +1,53 @@ +#pragma once + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_outbound_record.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Dependency injection class responsible for extracting records out of produce request's contents. + */ +class RecordExtractor { +public: + virtual ~RecordExtractor() = default; + + virtual std::vector + extractRecords(const std::vector& data) const PURE; +}; + +/** + * Proper implementation of record extractor, capable of parsing V2 record set. + * Reference: https://kafka.apache.org/24/documentation/#messageformat + */ +class RecordExtractorImpl : public RecordExtractor { +public: + // RecordExtractor + std::vector + extractRecords(const std::vector& data) const override; + + // Helper function to get the data (such as key, value) out of given input, as most of the + // interesting fields in records are kept as variable-encoded length and following bytes. + static absl::string_view extractByteArray(absl::string_view& input); + +private: + std::vector extractPartitionRecords(const std::string& topic, + const int32_t partition, + const Bytes& records) const; + + std::vector processRecordBatch(const std::string& topic, const int32_t partition, + absl::string_view data) const; + + OutboundRecord extractRecord(const std::string& topic, const int32_t partition, + absl::string_view& data) const; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/mesh/filter.cc b/contrib/kafka/filters/network/source/mesh/filter.cc similarity index 84% rename from source/extensions/filters/network/kafka/mesh/filter.cc rename to contrib/kafka/filters/network/source/mesh/filter.cc index f250046c849f7..10c0ef7961625 100644 --- a/source/extensions/filters/network/kafka/mesh/filter.cc +++ b/contrib/kafka/filters/network/source/mesh/filter.cc @@ -1,11 +1,12 @@ -#include "source/extensions/filters/network/kafka/mesh/filter.h" +#include "contrib/kafka/filters/network/source/mesh/filter.h" #include "envoy/network/connection.h" #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/kafka/external/requests.h" -#include "source/extensions/filters/network/kafka/external/responses.h" -#include "source/extensions/filters/network/kafka/response_codec.h" + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/external/responses.h" +#include "contrib/kafka/filters/network/source/response_codec.h" namespace Envoy { namespace Extensions { @@ -13,6 +14,11 @@ namespace NetworkFilters { namespace Kafka { namespace Mesh { +KafkaMeshFilter::KafkaMeshFilter(const UpstreamKafkaConfiguration& configuration, + UpstreamKafkaFacade& upstream_kafka_facade) + : KafkaMeshFilter{std::make_shared(std::vector( + {std::make_shared(*this, configuration, upstream_kafka_facade)}))} {} + KafkaMeshFilter::KafkaMeshFilter(RequestDecoderSharedPtr request_decoder) : request_decoder_{request_decoder} {} diff --git a/contrib/kafka/filters/network/source/mesh/filter.h b/contrib/kafka/filters/network/source/mesh/filter.h new file mode 100644 index 0000000000000..a6b4ec80cdd4f --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/filter.h @@ -0,0 +1,99 @@ +#pragma once + +#include "envoy/common/time.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" + +#include "source/common/common/logger.h" + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/mesh/abstract_command.h" +#include "contrib/kafka/filters/network/source/mesh/request_processor.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_config.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h" +#include "contrib/kafka/filters/network/source/request_codec.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Main entry point. + * Decoded request bytes are passed to processor, that calls us back with enriched request. + * Request then gets invoked to starts its processing. + * Filter is going to maintain a list of in-flight-request so it can send responses when they + * finish. + * + * + * +----------------+ +-----------------------+ + * |RequestProcessor+----------------->AbstractInFlightRequest| + * +-------^--------+ +----^-----^------------+ + * | | | + * | | +-+------------------+ + * +-------+-------+ | |ProduceRequestHolder| + * |KafkaMeshFilter+-----------------------+ +-+------------------+ + * +-------+-------+ | + * | | + * | | + * +-------v-----------+ | + * |UpstreamKafkaFacade| |(for callback when finished) + * +-------+-----------+ | + * | | + * | | + * +-------v--------------+ +--------------v--+ +-----------------+ + * |<> +------->RichKafkaProducer+--->><> | + * |ThreadLocalKafkaFacade| +-----------------+ |RdKafka::Producer| + * +----------------------+ +-----------------+ + **/ +class KafkaMeshFilter : public Network::ReadFilter, + public Network::ConnectionCallbacks, + public AbstractRequestListener, + private Logger::Loggable { +public: + // Main constructor. + KafkaMeshFilter(const UpstreamKafkaConfiguration& configuration, + UpstreamKafkaFacade& upstream_kafka_facade); + + // Visible for testing. + KafkaMeshFilter(RequestDecoderSharedPtr request_decoder); + + // Non-trivial. See 'abandonAllInFlightRequests'. + ~KafkaMeshFilter() override; + + // Network::ReadFilter + Network::FilterStatus onNewConnection() override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + + // AbstractRequestListener + void onRequest(InFlightRequestSharedPtr request) override; + void onRequestReadyForAnswer() override; + + std::list& getRequestsInFlightForTest(); + +private: + // Helper method invoked when connection gets dropped. + // Because filter can be destroyed before confirmations from Kafka are received, we are just going + // to mark related requests as abandoned, so they do not attempt to reference this filter anymore. + // Impl note: this is similar to what Redis filter does. + void abandonAllInFlightRequests(); + + const RequestDecoderSharedPtr request_decoder_; + + Network::ReadFilterCallbacks* read_filter_callbacks_; + + std::list requests_in_flight_; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/request_processor.cc b/contrib/kafka/filters/network/source/mesh/request_processor.cc new file mode 100644 index 0000000000000..0391cdaadcf6d --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/request_processor.cc @@ -0,0 +1,69 @@ +#include "contrib/kafka/filters/network/source/mesh/request_processor.h" + +#include "envoy/common/exception.h" + +#include "contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/metadata.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +RequestProcessor::RequestProcessor(AbstractRequestListener& origin, + const UpstreamKafkaConfiguration& configuration, + UpstreamKafkaFacade& upstream_kafka_facade) + : origin_{origin}, configuration_{configuration}, upstream_kafka_facade_{ + upstream_kafka_facade} {} + +// Helper function. Throws a nice message. Filter will react by closing the connection. +static void throwOnUnsupportedRequest(const std::string& reason, const RequestHeader& header) { + throw EnvoyException(absl::StrCat(reason, " Kafka request (key=", header.api_key_, ", version=", + header.api_version_, ", cid=", header.correlation_id_)); +} + +void RequestProcessor::onMessage(AbstractRequestSharedPtr arg) { + switch (arg->request_header_.api_key_) { + case PRODUCE_REQUEST_API_KEY: + process(std::dynamic_pointer_cast>(arg)); + break; + case METADATA_REQUEST_API_KEY: + process(std::dynamic_pointer_cast>(arg)); + break; + case API_VERSIONS_REQUEST_API_KEY: + process(std::dynamic_pointer_cast>(arg)); + break; + default: + // Client sent a request we cannot handle right now. + throwOnUnsupportedRequest("unsupported (bad client API invoked?)", arg->request_header_); + break; + } // switch +} + +void RequestProcessor::process(const std::shared_ptr> request) const { + auto res = std::make_shared(origin_, upstream_kafka_facade_, request); + origin_.onRequest(res); +} + +void RequestProcessor::process(const std::shared_ptr> request) const { + auto res = std::make_shared(origin_, configuration_, request); + origin_.onRequest(res); +} + +void RequestProcessor::process(const std::shared_ptr> request) const { + auto res = std::make_shared(origin_, request->request_header_); + origin_.onRequest(res); +} + +// We got something that the parser could not handle. +void RequestProcessor::onFailedParse(RequestParseFailureSharedPtr arg) { + throwOnUnsupportedRequest("unknown", arg->request_header_); +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/request_processor.h b/contrib/kafka/filters/network/source/mesh/request_processor.h new file mode 100644 index 0000000000000..b21a69ac22488 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/request_processor.h @@ -0,0 +1,43 @@ +#pragma once + +#include "source/common/common/logger.h" + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/mesh/abstract_command.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_config.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h" +#include "contrib/kafka/filters/network/source/request_codec.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Processes (enriches) incoming requests and passes it back to origin. + */ +class RequestProcessor : public RequestCallback, private Logger::Loggable { +public: + RequestProcessor(AbstractRequestListener& origin, const UpstreamKafkaConfiguration& configuration, + UpstreamKafkaFacade& upstream_kafka_facade); + + // RequestCallback + void onMessage(AbstractRequestSharedPtr arg) override; + void onFailedParse(RequestParseFailureSharedPtr) override; + +private: + void process(const std::shared_ptr> request) const; + void process(const std::shared_ptr> request) const; + void process(const std::shared_ptr> request) const; + + AbstractRequestListener& origin_; + const UpstreamKafkaConfiguration& configuration_; + UpstreamKafkaFacade& upstream_kafka_facade_; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/upstream_config.h b/contrib/kafka/filters/network/source/mesh/upstream_config.h new file mode 100644 index 0000000000000..00e3e7faf32da --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/upstream_config.h @@ -0,0 +1,56 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/common/pure.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +// Minor helper structure that contains information about upstream Kafka clusters. +struct ClusterConfig { + + // Cluster name, as it appears in configuration input. + std::string name_; + + // How many partitions do we expect for every one of the topics present in given upstream cluster. + // Impl note: this could be replaced with creating (shared?) AdminClient and having it reach out + // upstream to get configuration (or we could just send a correct request via codec). The response + // would need to be cached (as this data is frequently requested). + int32_t partition_count_; + + // The configuration that will be passed to upstream client for given cluster. + // This allows us to reference different clusters with different configs (e.g. linger.ms). + // This map always contains entry with key 'bootstrap.servers', as this is the only mandatory + // producer property. + std::map upstream_producer_properties_; +}; + +/** + * Keeps the configuration related to upstream Kafka clusters. + * Impl note: current matching from topic to cluster is based on prefix matching but more complex + * rules could be added. + */ +class UpstreamKafkaConfiguration { +public: + virtual ~UpstreamKafkaConfiguration() = default; + virtual absl::optional + computeClusterConfigForTopic(const std::string& topic) const PURE; + virtual std::pair getAdvertisedAddress() const PURE; +}; + +using UpstreamKafkaConfigurationSharedPtr = std::shared_ptr; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h new file mode 100644 index 0000000000000..24e9b36efdc65 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +// Trivial memento that keeps the information about how given request was delivered: +// in case of success this means offset (if acks > 0), or error code. +struct DeliveryMemento { + + // Pointer to byte array that was passed to Kafka producer. + // We use this to tell apart messages. + // Important: we do not free this memory, it's still part of the 'ProduceRequestHandler' object. + // Future work: adopt Kafka's opaque-pointer functionality so we use less memory instead of + // keeping whole payload until we receive a confirmation. + const void* data_; + + // Kafka producer error code. + const int32_t error_code_; + + // Offset (only meaningful if error code is equal to 0). + const int64_t offset_; +}; + +// Callback for objects that want to be notified that record delivery has been finished. +class ProduceFinishCb { +public: + virtual ~ProduceFinishCb() = default; + + // Attempt to process this delivery. + // @returns true if given callback is related to this delivery + virtual bool accept(const DeliveryMemento& memento) PURE; +}; + +using ProduceFinishCbSharedPtr = std::shared_ptr; + +/** + * Filter facing interface. + * A thing that takes records and sends them to upstream Kafka. + */ +class KafkaProducer { +public: + virtual ~KafkaProducer() = default; + + /* + * Sends given record (key, value) to Kafka (topic, partition). + * When delivery is finished, it notifies the callback provided with corresponding delivery data + * (error code, offset). + * + * @param origin origin of payload to be notified when delivery finishes. + * @param topic Kafka topic. + * @param partition Kafka partition (as clients do partitioning, we just reuse what downstream + * gave us). + * @param key Kafka message key. + * @param value Kafka message value. + */ + virtual void send(const ProduceFinishCbSharedPtr origin, const std::string& topic, + const int32_t partition, const absl::string_view key, + const absl::string_view value) PURE; + + // Impl leakage: real implementations of Kafka Producer need to stop a monitoring thread, then + // they can close the producer. Because the polling thread should not be interrupted, we just mark + // it as finished, and it's going to notice that change on the next iteration. + // Theoretically we do not need to do this and leave it all to destructor, but then closing N + // producers would require doing that in sequence, while we can optimize it somewhat (so we just + // wait for the slowest one). + // See https://github.com/edenhill/librdkafka/issues/2972 + virtual void markFinished() PURE; +}; + +using KafkaProducerPtr = std::unique_ptr; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.cc b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.cc new file mode 100644 index 0000000000000..fd43b61a2cf2b --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.cc @@ -0,0 +1,152 @@ +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +class LibRdKafkaUtilsImpl : public LibRdKafkaUtils { + + // LibRdKafkaUtils + RdKafka::Conf::ConfResult setConfProperty(RdKafka::Conf& conf, const std::string& name, + const std::string& value, + std::string& errstr) const override { + return conf.set(name, value, errstr); + } + + // LibRdKafkaUtils + RdKafka::Conf::ConfResult setConfDeliveryCallback(RdKafka::Conf& conf, + RdKafka::DeliveryReportCb* dr_cb, + std::string& errstr) const override { + return conf.set("dr_cb", dr_cb, errstr); + } + + // LibRdKafkaUtils + std::unique_ptr createProducer(RdKafka::Conf* conf, + std::string& errstr) const override { + return std::unique_ptr(RdKafka::Producer::create(conf, errstr)); + } +}; + +RichKafkaProducer::RichKafkaProducer(Event::Dispatcher& dispatcher, + Thread::ThreadFactory& thread_factory, + const RawKafkaProducerConfig& configuration) + : RichKafkaProducer(dispatcher, thread_factory, configuration, LibRdKafkaUtilsImpl{}){}; + +RichKafkaProducer::RichKafkaProducer(Event::Dispatcher& dispatcher, + Thread::ThreadFactory& thread_factory, + const RawKafkaProducerConfig& configuration, + const LibRdKafkaUtils& utils) + : dispatcher_{dispatcher} { + + // Create producer configuration object. + std::unique_ptr conf = + std::unique_ptr(RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL)); + std::string errstr; + + // Setup producer custom properties. + for (const auto& e : configuration) { + if (utils.setConfProperty(*conf, e.first, e.second, errstr) != RdKafka::Conf::CONF_OK) { + throw EnvoyException(absl::StrCat("Could not set producer property [", e.first, "] to [", + e.second, "]:", errstr)); + } + } + + // Setup callback (this callback is going to be invoked in dedicated monitoring thread). + if (utils.setConfDeliveryCallback(*conf, this, errstr) != RdKafka::Conf::CONF_OK) { + throw EnvoyException(absl::StrCat("Could not set producer callback:", errstr)); + } + + // Finally, we create the producer. + producer_ = utils.createProducer(conf.get(), errstr); + if (!producer_) { + throw EnvoyException(absl::StrCat("Could not create producer:", errstr)); + } + + // Start the monitoring thread. + poller_thread_active_ = true; + std::function thread_routine = [this]() -> void { checkDeliveryReports(); }; + poller_thread_ = thread_factory.createThread(thread_routine); +} + +RichKafkaProducer::~RichKafkaProducer() { + ENVOY_LOG(debug, "Shutting down worker thread"); + poller_thread_active_ = false; // This should never be needed, as we call 'markFinished' earlier. + poller_thread_->join(); + ENVOY_LOG(debug, "Worker thread shut down successfully"); +} + +void RichKafkaProducer::markFinished() { poller_thread_active_ = false; } + +void RichKafkaProducer::send(const ProduceFinishCbSharedPtr origin, const std::string& topic, + const int32_t partition, const absl::string_view key, + const absl::string_view value) { + { + void* value_data = const_cast(value.data()); // Needed for Kafka API. + // Data is a pointer into request internals, and it is going to be managed by + // ProduceRequestHolder lifecycle. So we are not going to use any of librdkafka's memory + // management. + const int flags = 0; + const RdKafka::ErrorCode ec = producer_->produce( + topic, partition, flags, value_data, value.size(), key.data(), key.size(), 0, nullptr); + if (RdKafka::ERR_NO_ERROR == ec) { + // We have succeeded with submitting data to producer, so we register a callback. + unfinished_produce_requests_.push_back(origin); + } else { + // We could not submit data to producer. + // Let's treat that as a normal failure (Envoy is a broker after all) and propagate + // downstream. + ENVOY_LOG(trace, "Produce failure: {}, while sending to [{}/{}]", ec, topic, partition); + const DeliveryMemento memento = {value_data, ec, 0}; + origin->accept(memento); + } + } +} + +void RichKafkaProducer::checkDeliveryReports() { + while (poller_thread_active_) { + // We are going to wait for 1000ms, returning when an event (message delivery) happens or + // producer is closed. Unfortunately we do not have any ability to interrupt this call, so every + // destructor is going to take up to this much time. + producer_->poll(1000); + // This invokes the callback below, if any delivery finished (successful or not). + } + ENVOY_LOG(debug, "Poller thread finished"); +} + +// Kafka callback that contains the delivery information. +void RichKafkaProducer::dr_cb(RdKafka::Message& message) { + ENVOY_LOG(trace, "Delivery finished: {}, payload has been saved at offset {} in {}/{}", + message.err(), message.topic_name(), message.partition(), message.offset()); + const DeliveryMemento memento = {message.payload(), message.err(), message.offset()}; + // Because this method gets executed in poller thread, we need to pass the data through + // dispatcher. + const Event::PostCb callback = [this, memento]() -> void { processDelivery(memento); }; + dispatcher_.post(callback); +} + +// We got the delivery data. +// Now we just check all unfinished requests, find the one that originated this particular delivery, +// and notify it. +void RichKafkaProducer::processDelivery(const DeliveryMemento& memento) { + for (auto it = unfinished_produce_requests_.begin(); it != unfinished_produce_requests_.end();) { + bool accepted = (*it)->accept(memento); + if (accepted) { + unfinished_produce_requests_.erase(it); + break; // This is important - a single request can be mapped into multiple callbacks here. + } else { + ++it; + } + } +} + +std::list& RichKafkaProducer::getUnfinishedRequestsForTest() { + return unfinished_produce_requests_; +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h new file mode 100644 index 0000000000000..82b3e549c2be3 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h @@ -0,0 +1,104 @@ +#pragma once + +#include + +#include "envoy/event/dispatcher.h" + +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h" +#include "librdkafka/rdkafkacpp.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Helper class responsible for creating librdkafka entities, so we can have mocks in tests. + */ +class LibRdKafkaUtils { +public: + virtual ~LibRdKafkaUtils() = default; + + virtual RdKafka::Conf::ConfResult setConfProperty(RdKafka::Conf& conf, const std::string& name, + const std::string& value, + std::string& errstr) const PURE; + + virtual RdKafka::Conf::ConfResult setConfDeliveryCallback(RdKafka::Conf& conf, + RdKafka::DeliveryReportCb* dr_cb, + std::string& errstr) const PURE; + + virtual std::unique_ptr createProducer(RdKafka::Conf* conf, + std::string& errstr) const PURE; +}; + +using RawKafkaProducerConfig = std::map; + +/** + * Combines the librdkafka producer and its dedicated monitoring thread. + * Producer is used to schedule messages to be sent to Kafka. + * Independently running monitoring thread picks up delivery confirmations from producer and uses + * Dispatcher to notify itself about delivery in worker thread. + */ +class RichKafkaProducer : public KafkaProducer, + public RdKafka::DeliveryReportCb, + private Logger::Loggable { +public: + // Main constructor. + RichKafkaProducer(Event::Dispatcher& dispatcher, Thread::ThreadFactory& thread_factory, + const RawKafkaProducerConfig& configuration); + + // Visible for testing (allows injection of LibRdKafkaUtils). + RichKafkaProducer(Event::Dispatcher& dispatcher, Thread::ThreadFactory& thread_factory, + const RawKafkaProducerConfig& configuration, const LibRdKafkaUtils& utils); + + // More complex than usual. + // Marks that monitoring thread should finish and waits for it to join. + ~RichKafkaProducer() override; + + // KafkaProducer + void markFinished() override; + + // KafkaProducer + void send(const ProduceFinishCbSharedPtr origin, const std::string& topic, + const int32_t partition, const absl::string_view key, + const absl::string_view value) override; + + // This method gets executed by monitoring thread. + // Does not finish until this object gets 'markFinished' invoked or gets destroyed. + // Executed in dedicated monitoring thread. + void checkDeliveryReports(); + + // RdKafka::DeliveryReportCb + void dr_cb(RdKafka::Message& message) override; + + // Processes the delivery confirmation. + // Executed in Envoy worker thread. + void processDelivery(const DeliveryMemento& memento); + + std::list& getUnfinishedRequestsForTest(); + +private: + Event::Dispatcher& dispatcher_; + + std::list unfinished_produce_requests_; + + // Real Kafka producer (thread-safe). + // Invoked by Envoy handler thread (to produce), and internal monitoring thread + // (to poll for delivery events). + std::unique_ptr producer_; + + // Flag controlling monitoring threads's execution. + std::atomic poller_thread_active_; + + // Monitoring thread that's responsible for continuously polling for new Kafka producer events. + Thread::ThreadPtr poller_thread_; +}; + +using RichKafkaProducerPtr = std::unique_ptr; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.cc b/contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.cc new file mode 100644 index 0000000000000..6d096a0c95255 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.cc @@ -0,0 +1,100 @@ +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h" + +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Responsible for keeping a map of upstream-facing Kafka clients. + */ +class ThreadLocalKafkaFacade : public ThreadLocal::ThreadLocalObject, + private Logger::Loggable { +public: + ThreadLocalKafkaFacade(const UpstreamKafkaConfiguration& configuration, + Event::Dispatcher& dispatcher, Thread::ThreadFactory& thread_factory); + ~ThreadLocalKafkaFacade() override; + + KafkaProducer& getProducerForTopic(const std::string& topic); + + size_t getProducerCountForTest() const; + +private: + // Mutates 'cluster_to_kafka_client_'. + KafkaProducer& registerNewProducer(const ClusterConfig& cluster_config); + + const UpstreamKafkaConfiguration& configuration_; + Event::Dispatcher& dispatcher_; + Thread::ThreadFactory& thread_factory_; + + std::map cluster_to_kafka_client_; +}; + +ThreadLocalKafkaFacade::ThreadLocalKafkaFacade(const UpstreamKafkaConfiguration& configuration, + Event::Dispatcher& dispatcher, + Thread::ThreadFactory& thread_factory) + : configuration_{configuration}, dispatcher_{dispatcher}, thread_factory_{thread_factory} {} + +ThreadLocalKafkaFacade::~ThreadLocalKafkaFacade() { + // Because the producers take a moment to shutdown, we mark their monitoring threads as shut down + // before the destructors get called. + for (auto& entry : cluster_to_kafka_client_) { + entry.second->markFinished(); + } +} + +KafkaProducer& ThreadLocalKafkaFacade::getProducerForTopic(const std::string& topic) { + const absl::optional cluster_config = + configuration_.computeClusterConfigForTopic(topic); + if (cluster_config) { + const auto it = cluster_to_kafka_client_.find(cluster_config->name_); + // Return client already present or create new one and register it. + return (cluster_to_kafka_client_.end() == it) ? registerNewProducer(*cluster_config) + : *(it->second); + } else { + throw EnvoyException(absl::StrCat("cannot compute target producer for topic: ", topic)); + } +} + +KafkaProducer& ThreadLocalKafkaFacade::registerNewProducer(const ClusterConfig& cluster_config) { + ENVOY_LOG(debug, "Registering new Kafka producer for cluster [{}]", cluster_config.name_); + KafkaProducerPtr new_producer = std::make_unique( + dispatcher_, thread_factory_, cluster_config.upstream_producer_properties_); + auto result = cluster_to_kafka_client_.emplace(cluster_config.name_, std::move(new_producer)); + return *(result.first->second); +} + +size_t ThreadLocalKafkaFacade::getProducerCountForTest() const { + return cluster_to_kafka_client_.size(); +} + +UpstreamKafkaFacadeImpl::UpstreamKafkaFacadeImpl(const UpstreamKafkaConfiguration& configuration, + ThreadLocal::SlotAllocator& slot_allocator, + Thread::ThreadFactory& thread_factory) + : tls_{slot_allocator.allocateSlot()} { + + ThreadLocal::Slot::InitializeCb cb = + [&configuration, + &thread_factory](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(configuration, dispatcher, thread_factory); + }; + tls_->set(cb); +} + +// Return KafkaProducer instance that is local to given thread, via ThreadLocalKafkaFacade. +KafkaProducer& UpstreamKafkaFacadeImpl::getProducerForTopic(const std::string& topic) { + return tls_->getTyped().getProducerForTopic(topic); +} + +size_t UpstreamKafkaFacadeImpl::getProducerCountForTest() const { + return tls_->getTyped().getProducerCountForTest(); +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h b/contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h new file mode 100644 index 0000000000000..9cf69aa5f2241 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h @@ -0,0 +1,58 @@ +#pragma once + +#include "envoy/thread/thread.h" +#include "envoy/thread_local/thread_local.h" + +#include "source/common/common/logger.h" + +#include "contrib/kafka/filters/network/source/mesh/upstream_config.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_client.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Provides access to upstream Kafka clients. + */ +class UpstreamKafkaFacade { +public: + virtual ~UpstreamKafkaFacade() = default; + + /** + * Returns a Kafka producer that points an upstream Kafka cluster that is supposed to receive + * messages for the given topic. + */ + virtual KafkaProducer& getProducerForTopic(const std::string& topic) PURE; +}; + +using UpstreamKafkaFacadeSharedPtr = std::shared_ptr; + +/** + * Provides access to upstream Kafka clients. + * This is done by using thread-local maps of cluster to producer. + * We are going to have one Kafka producer per upstream cluster, per Envoy worker thread. + */ +class UpstreamKafkaFacadeImpl : public UpstreamKafkaFacade, + private Logger::Loggable { +public: + UpstreamKafkaFacadeImpl(const UpstreamKafkaConfiguration& configuration, + ThreadLocal::SlotAllocator& slot_allocator, + Thread::ThreadFactory& thread_factory); + + // UpstreamKafkaFacade + KafkaProducer& getProducerForTopic(const std::string& topic) override; + + size_t getProducerCountForTest() const; + +private: + ThreadLocal::SlotPtr tls_; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/parser.h b/contrib/kafka/filters/network/source/parser.h similarity index 100% rename from source/extensions/filters/network/kafka/parser.h rename to contrib/kafka/filters/network/source/parser.h diff --git a/source/extensions/filters/network/kafka/protocol/complex_type_template.j2 b/contrib/kafka/filters/network/source/protocol/complex_type_template.j2 similarity index 100% rename from source/extensions/filters/network/kafka/protocol/complex_type_template.j2 rename to contrib/kafka/filters/network/source/protocol/complex_type_template.j2 diff --git a/source/extensions/filters/network/kafka/protocol/generator.py b/contrib/kafka/filters/network/source/protocol/generator.py similarity index 98% rename from source/extensions/filters/network/kafka/protocol/generator.py rename to contrib/kafka/filters/network/source/protocol/generator.py index 3ba3ac0a844ff..846dd2aa2d9b7 100755 --- a/source/extensions/filters/network/kafka/protocol/generator.py +++ b/contrib/kafka/filters/network/source/protocol/generator.py @@ -700,8 +700,17 @@ def is_printable(self): class RenderingHelper: """ - Helper for jinja templates. - """ + Utility function that allows us to process names in jinja easier. + """ + + @staticmethod + def camel_case_to_snake_case(str): + import re + return re.sub('(?!^)([A-Z]+)', r'_\1', str) + + """ + Helper for jinja templates. + """ @staticmethod def get_template(template): @@ -713,4 +722,5 @@ def get_template(template): env = jinja2.Environment( loader=jinja2.FileSystemLoader( searchpath=os.path.dirname(os.path.abspath(sys.argv[0])))) + env.filters['camel_case_to_snake_case'] = RenderingHelper.camel_case_to_snake_case return env.get_template(template) diff --git a/source/extensions/filters/network/kafka/protocol/kafka_request_resolver_cc.j2 b/contrib/kafka/filters/network/source/protocol/kafka_request_resolver_cc.j2 similarity index 89% rename from source/extensions/filters/network/kafka/protocol/kafka_request_resolver_cc.j2 rename to contrib/kafka/filters/network/source/protocol/kafka_request_resolver_cc.j2 index 0d2da9aaa7c50..be7b83288020e 100644 --- a/source/extensions/filters/network/kafka/protocol/kafka_request_resolver_cc.j2 +++ b/contrib/kafka/filters/network/source/protocol/kafka_request_resolver_cc.j2 @@ -3,9 +3,9 @@ Defines default Kafka request resolver, that uses request parsers in (also generated) 'requests.h'. #} -#include "source/extensions/filters/network/kafka/external/requests.h" -#include "source/extensions/filters/network/kafka/kafka_request_parser.h" -#include "source/extensions/filters/network/kafka/parser.h" +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/kafka_request_parser.h" +#include "contrib/kafka/filters/network/source/parser.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/protocol/kafka_response_resolver_cc.j2 b/contrib/kafka/filters/network/source/protocol/kafka_response_resolver_cc.j2 similarity index 93% rename from source/extensions/filters/network/kafka/protocol/kafka_response_resolver_cc.j2 rename to contrib/kafka/filters/network/source/protocol/kafka_response_resolver_cc.j2 index 0256e370dca6d..5524b0cd84944 100644 --- a/source/extensions/filters/network/kafka/protocol/kafka_response_resolver_cc.j2 +++ b/contrib/kafka/filters/network/source/protocol/kafka_response_resolver_cc.j2 @@ -3,8 +3,8 @@ Defines default Kafka response resolver, that uses response parsers in (also generated) 'responses.h'. #} -#include "source/extensions/filters/network/kafka/external/responses.h" -#include "source/extensions/filters/network/kafka/kafka_response_parser.h" +#include "contrib/kafka/filters/network/source/external/responses.h" +#include "contrib/kafka/filters/network/source/kafka_response_parser.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/protocol/launcher.py b/contrib/kafka/filters/network/source/protocol/launcher.py similarity index 95% rename from source/extensions/filters/network/kafka/protocol/launcher.py rename to contrib/kafka/filters/network/source/protocol/launcher.py index f8b36570d0375..b8dd4f287f094 100644 --- a/source/extensions/filters/network/kafka/protocol/launcher.py +++ b/contrib/kafka/filters/network/source/protocol/launcher.py @@ -2,7 +2,7 @@ # Launcher for generating Kafka protocol code. -import source.extensions.filters.network.kafka.protocol.generator as generator +import contrib.kafka.filters.network.source.protocol.generator as generator import sys import os diff --git a/source/extensions/filters/network/kafka/protocol/request_metrics_h.j2 b/contrib/kafka/filters/network/source/protocol/request_metrics_h.j2 similarity index 100% rename from source/extensions/filters/network/kafka/protocol/request_metrics_h.j2 rename to contrib/kafka/filters/network/source/protocol/request_metrics_h.j2 diff --git a/source/extensions/filters/network/kafka/protocol/request_parser.j2 b/contrib/kafka/filters/network/source/protocol/request_parser.j2 similarity index 76% rename from source/extensions/filters/network/kafka/protocol/request_parser.j2 rename to contrib/kafka/filters/network/source/protocol/request_parser.j2 index 712f0d4294f23..db536a0e03e14 100644 --- a/source/extensions/filters/network/kafka/protocol/request_parser.j2 +++ b/contrib/kafka/filters/network/source/protocol/request_parser.j2 @@ -8,6 +8,12 @@ (see 'kafka_request_resolver_cc.j2'). #} +constexpr int16_t {{ complex_type.name | camel_case_to_snake_case | upper }}_API_KEY = + {{ complex_type.get_extra('api_key') }}; + +constexpr int16_t {{ complex_type.name | camel_case_to_snake_case | upper }}_MAX_VERSION = + {{ complex_type.versions[-1] }}; + {% for version in complex_type.versions %}class {{ complex_type.name }}V{{ version }}Parser: public RequestDataParser< {{ complex_type.name }}, {{ complex_type.name }}V{{ version }}Deserializer> diff --git a/source/extensions/filters/network/kafka/protocol/requests_h.j2 b/contrib/kafka/filters/network/source/protocol/requests_h.j2 similarity index 90% rename from source/extensions/filters/network/kafka/protocol/requests_h.j2 rename to contrib/kafka/filters/network/source/protocol/requests_h.j2 index e6b4fd42976c4..eae7afc9ac996 100644 --- a/source/extensions/filters/network/kafka/protocol/requests_h.j2 +++ b/contrib/kafka/filters/network/source/protocol/requests_h.j2 @@ -22,8 +22,8 @@ (because partition data is present in every FetchRequestTopic version). #} #pragma once -#include "source/extensions/filters/network/kafka/kafka_request.h" -#include "source/extensions/filters/network/kafka/kafka_request_parser.h" +#include "contrib/kafka/filters/network/source/kafka_request.h" +#include "contrib/kafka/filters/network/source/kafka_request_parser.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/protocol/response_metrics_h.j2 b/contrib/kafka/filters/network/source/protocol/response_metrics_h.j2 similarity index 100% rename from source/extensions/filters/network/kafka/protocol/response_metrics_h.j2 rename to contrib/kafka/filters/network/source/protocol/response_metrics_h.j2 diff --git a/source/extensions/filters/network/kafka/protocol/response_parser.j2 b/contrib/kafka/filters/network/source/protocol/response_parser.j2 similarity index 100% rename from source/extensions/filters/network/kafka/protocol/response_parser.j2 rename to contrib/kafka/filters/network/source/protocol/response_parser.j2 diff --git a/source/extensions/filters/network/kafka/protocol/responses_h.j2 b/contrib/kafka/filters/network/source/protocol/responses_h.j2 similarity index 90% rename from source/extensions/filters/network/kafka/protocol/responses_h.j2 rename to contrib/kafka/filters/network/source/protocol/responses_h.j2 index 099e6c014c03b..643c6f70966be 100644 --- a/source/extensions/filters/network/kafka/protocol/responses_h.j2 +++ b/contrib/kafka/filters/network/source/protocol/responses_h.j2 @@ -23,8 +23,8 @@ - AbortedTransaction & its Deserializers (starting with version 4). #} #pragma once -#include "source/extensions/filters/network/kafka/kafka_response.h" -#include "source/extensions/filters/network/kafka/kafka_response_parser.h" +#include "contrib/kafka/filters/network/source/kafka_response.h" +#include "contrib/kafka/filters/network/source/kafka_response_parser.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/request_codec.cc b/contrib/kafka/filters/network/source/request_codec.cc similarity index 93% rename from source/extensions/filters/network/kafka/request_codec.cc rename to contrib/kafka/filters/network/source/request_codec.cc index f389cecad74b9..646cef850e9a7 100644 --- a/source/extensions/filters/network/kafka/request_codec.cc +++ b/contrib/kafka/filters/network/source/request_codec.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/kafka/request_codec.h" +#include "contrib/kafka/filters/network/source/request_codec.h" #include "source/common/buffer/buffer_impl.h" diff --git a/source/extensions/filters/network/kafka/request_codec.h b/contrib/kafka/filters/network/source/request_codec.h similarity index 91% rename from source/extensions/filters/network/kafka/request_codec.h rename to contrib/kafka/filters/network/source/request_codec.h index 07fa4b2b90e0d..25bcbbfbe1fda 100644 --- a/source/extensions/filters/network/kafka/request_codec.h +++ b/contrib/kafka/filters/network/source/request_codec.h @@ -3,10 +3,10 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/pure.h" -#include "source/extensions/filters/network/kafka/codec.h" -#include "source/extensions/filters/network/kafka/kafka_request.h" -#include "source/extensions/filters/network/kafka/kafka_request_parser.h" -#include "source/extensions/filters/network/kafka/parser.h" +#include "contrib/kafka/filters/network/source/codec.h" +#include "contrib/kafka/filters/network/source/kafka_request.h" +#include "contrib/kafka/filters/network/source/kafka_request_parser.h" +#include "contrib/kafka/filters/network/source/parser.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/requirements.txt b/contrib/kafka/filters/network/source/requirements.txt similarity index 100% rename from source/extensions/filters/network/kafka/requirements.txt rename to contrib/kafka/filters/network/source/requirements.txt diff --git a/source/extensions/filters/network/kafka/response_codec.cc b/contrib/kafka/filters/network/source/response_codec.cc similarity index 95% rename from source/extensions/filters/network/kafka/response_codec.cc rename to contrib/kafka/filters/network/source/response_codec.cc index ccde039bb9514..48400a91a73b5 100644 --- a/source/extensions/filters/network/kafka/response_codec.cc +++ b/contrib/kafka/filters/network/source/response_codec.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/kafka/response_codec.h" +#include "contrib/kafka/filters/network/source/response_codec.h" #include "source/common/buffer/buffer_impl.h" diff --git a/source/extensions/filters/network/kafka/response_codec.h b/contrib/kafka/filters/network/source/response_codec.h similarity index 96% rename from source/extensions/filters/network/kafka/response_codec.h rename to contrib/kafka/filters/network/source/response_codec.h index 688defdc75b1b..72f900a9ef342 100644 --- a/source/extensions/filters/network/kafka/response_codec.h +++ b/contrib/kafka/filters/network/source/response_codec.h @@ -1,7 +1,7 @@ #pragma once -#include "source/extensions/filters/network/kafka/codec.h" -#include "source/extensions/filters/network/kafka/kafka_response_parser.h" +#include "contrib/kafka/filters/network/source/codec.h" +#include "contrib/kafka/filters/network/source/kafka_response_parser.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/serialization.cc b/contrib/kafka/filters/network/source/serialization.cc similarity index 99% rename from source/extensions/filters/network/kafka/serialization.cc rename to contrib/kafka/filters/network/source/serialization.cc index f34f6b0d652a2..fc8464f7aac0d 100644 --- a/source/extensions/filters/network/kafka/serialization.cc +++ b/contrib/kafka/filters/network/source/serialization.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/kafka/serialization.h" +#include "contrib/kafka/filters/network/source/serialization.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/serialization.h b/contrib/kafka/filters/network/source/serialization.h similarity index 99% rename from source/extensions/filters/network/kafka/serialization.h rename to contrib/kafka/filters/network/source/serialization.h index 3abc5f1fb5597..3401199c002f8 100644 --- a/source/extensions/filters/network/kafka/serialization.h +++ b/contrib/kafka/filters/network/source/serialization.h @@ -13,10 +13,10 @@ #include "source/common/common/fmt.h" #include "source/common/common/safe_memcpy.h" #include "source/common/common/utility.h" -#include "source/extensions/filters/network/kafka/kafka_types.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" +#include "contrib/kafka/filters/network/source/kafka_types.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/kafka/serialization/generator.py b/contrib/kafka/filters/network/source/serialization/generator.py similarity index 100% rename from source/extensions/filters/network/kafka/serialization/generator.py rename to contrib/kafka/filters/network/source/serialization/generator.py diff --git a/source/extensions/filters/network/kafka/serialization/launcher.py b/contrib/kafka/filters/network/source/serialization/launcher.py similarity index 91% rename from source/extensions/filters/network/kafka/serialization/launcher.py rename to contrib/kafka/filters/network/source/serialization/launcher.py index 571f448086a9e..bf4b97497a257 100644 --- a/source/extensions/filters/network/kafka/serialization/launcher.py +++ b/contrib/kafka/filters/network/source/serialization/launcher.py @@ -2,7 +2,7 @@ # Launcher for generating composite serializer code. -import source.extensions.filters.network.kafka.serialization.generator as generator +import contrib.kafka.filters.network.source.serialization.generator as generator import sys import os diff --git a/source/extensions/filters/network/kafka/serialization/serialization_composite_h.j2 b/contrib/kafka/filters/network/source/serialization/serialization_composite_h.j2 similarity index 96% rename from source/extensions/filters/network/kafka/serialization/serialization_composite_h.j2 rename to contrib/kafka/filters/network/source/serialization/serialization_composite_h.j2 index 0f4e7065c41cb..620aa9855cb2d 100644 --- a/source/extensions/filters/network/kafka/serialization/serialization_composite_h.j2 +++ b/contrib/kafka/filters/network/source/serialization/serialization_composite_h.j2 @@ -19,8 +19,8 @@ #include "source/common/common/byte_order.h" #include "source/common/common/fmt.h" -#include "source/extensions/filters/network/kafka/kafka_types.h" -#include "source/extensions/filters/network/kafka/serialization.h" +#include "contrib/kafka/filters/network/source/kafka_types.h" +#include "contrib/kafka/filters/network/source/serialization.h" #include "absl/strings/string_view.h" diff --git a/source/extensions/filters/network/kafka/tagged_fields.h b/contrib/kafka/filters/network/source/tagged_fields.h similarity index 98% rename from source/extensions/filters/network/kafka/tagged_fields.h rename to contrib/kafka/filters/network/source/tagged_fields.h index 6ace5ce2fccad..f9aebaf7472bf 100644 --- a/source/extensions/filters/network/kafka/tagged_fields.h +++ b/contrib/kafka/filters/network/source/tagged_fields.h @@ -2,7 +2,7 @@ #include -#include "source/extensions/filters/network/kafka/serialization.h" +#include "contrib/kafka/filters/network/source/serialization.h" /** * This header file provides serialization support for tagged fields structure added in 2.4. diff --git a/test/extensions/filters/network/kafka/BUILD b/contrib/kafka/filters/network/test/BUILD similarity index 64% rename from test/extensions/filters/network/kafka/BUILD rename to contrib/kafka/filters/network/test/BUILD index 3531b50ba4b30..4c620dcfe43b9 100644 --- a/test/extensions/filters/network/kafka/BUILD +++ b/contrib/kafka/filters/network/test/BUILD @@ -1,26 +1,23 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_test", "envoy_cc_test_library", - "envoy_package", -) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", + "envoy_contrib_package", ) load("@rules_python//python:defs.bzl", "py_binary") load("@kafka_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 -envoy_package() +envoy_contrib_package() envoy_cc_test_library( name = "buffer_based_test_lib", srcs = [], hdrs = ["buffer_based_test.h"], deps = [ + "//contrib/kafka/filters/network/source:serialization_lib", "//source/common/buffer:buffer_lib", - "//source/extensions/filters/network/kafka:serialization_lib", ], ) @@ -29,30 +26,28 @@ envoy_cc_test_library( srcs = ["serialization_utilities.cc"], hdrs = ["serialization_utilities.h"], deps = [ + "//contrib/kafka/filters/network/source:serialization_lib", "//source/common/buffer:buffer_lib", - "//source/extensions/filters/network/kafka:serialization_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "serialization_test", srcs = ["serialization_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":serialization_utilities_lib", - "//source/extensions/filters/network/kafka:serialization_lib", - "//source/extensions/filters/network/kafka:tagged_fields_lib", + "//contrib/kafka/filters/network/source:serialization_lib", + "//contrib/kafka/filters/network/source:tagged_fields_lib", "//test/mocks/server:server_mocks", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "serialization_composite_test", srcs = ["external/serialization_composite_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":serialization_utilities_lib", - "//source/extensions/filters/network/kafka:serialization_lib", + "//contrib/kafka/filters/network/source:serialization_lib", "//test/mocks/server:server_mocks", ], ) @@ -76,7 +71,7 @@ py_binary( data = glob(["serialization/*.j2"]), main = "serialization/launcher.py", deps = [ - "//source/extensions/filters/network/kafka:serialization_composite_generator_lib", + "//contrib/kafka/filters/network/source:serialization_composite_generator_lib", requirement("Jinja2"), requirement("MarkupSafe"), ], @@ -90,61 +85,56 @@ envoy_cc_test_library( ], hdrs = ["message_utilities.h"], deps = [ - "//source/extensions/filters/network/kafka:kafka_request_parser_lib", - "//source/extensions/filters/network/kafka:kafka_response_parser_lib", + "//contrib/kafka/filters/network/source:kafka_request_parser_lib", + "//contrib/kafka/filters/network/source:kafka_response_parser_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "kafka_request_parser_test", srcs = ["kafka_request_parser_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", ":serialization_utilities_lib", - "//source/extensions/filters/network/kafka:kafka_request_parser_lib", + "//contrib/kafka/filters/network/source:kafka_request_parser_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "request_codec_unit_test", srcs = ["request_codec_unit_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", - "//source/extensions/filters/network/kafka:kafka_request_codec_lib", + "//contrib/kafka/filters/network/source:kafka_request_codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "request_codec_integration_test", srcs = ["request_codec_integration_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", ":serialization_utilities_lib", - "//source/extensions/filters/network/kafka:kafka_request_codec_lib", + "//contrib/kafka/filters/network/source:kafka_request_codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "request_codec_request_test", srcs = ["external/request_codec_request_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", ":serialization_utilities_lib", - "//source/extensions/filters/network/kafka:kafka_request_codec_lib", + "//contrib/kafka/filters/network/source:kafka_request_codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "requests_test", srcs = ["external/requests_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", - "//source/extensions/filters/network/kafka:kafka_request_codec_lib", + "//contrib/kafka/filters/network/source:kafka_request_codec_lib", ], ) @@ -170,56 +160,51 @@ genrule( ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "kafka_response_parser_test", srcs = ["kafka_response_parser_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", ":serialization_utilities_lib", - "//source/extensions/filters/network/kafka:kafka_response_parser_lib", + "//contrib/kafka/filters/network/source:kafka_response_parser_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "response_codec_unit_test", srcs = ["response_codec_unit_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", - "//source/extensions/filters/network/kafka:kafka_response_codec_lib", + "//contrib/kafka/filters/network/source:kafka_response_codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "response_codec_integration_test", srcs = ["response_codec_integration_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", ":serialization_utilities_lib", - "//source/extensions/filters/network/kafka:kafka_response_codec_lib", + "//contrib/kafka/filters/network/source:kafka_response_codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "response_codec_response_test", srcs = ["external/response_codec_response_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", ":serialization_utilities_lib", - "//source/extensions/filters/network/kafka:kafka_response_codec_lib", + "//contrib/kafka/filters/network/source:kafka_response_codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "responses_test", srcs = ["external/responses_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":buffer_based_test_lib", - "//source/extensions/filters/network/kafka:kafka_response_codec_lib", + "//contrib/kafka/filters/network/source:kafka_response_codec_lib", ], ) @@ -251,19 +236,18 @@ py_binary( data = glob(["protocol/*.j2"]), main = "protocol/launcher.py", deps = [ - "//source/extensions/filters/network/kafka:kafka_protocol_generator_lib", + "//contrib/kafka/filters/network/source:kafka_protocol_generator_lib", requirement("Jinja2"), requirement("MarkupSafe"), ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "metrics_integration_test", srcs = ["metrics_integration_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], deps = [ ":message_utilities", - "//source/extensions/filters/network/kafka:kafka_broker_filter_lib", + "//contrib/kafka/filters/network/source:kafka_broker_filter_lib", "//test/common/stats:stat_test_utility_lib", ], ) diff --git a/contrib/kafka/filters/network/test/broker/BUILD b/contrib/kafka/filters/network/test/broker/BUILD new file mode 100644 index 0000000000000..1edda5875b4d7 --- /dev/null +++ b/contrib/kafka/filters/network/test/broker/BUILD @@ -0,0 +1,41 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_test( + name = "config_unit_test", + srcs = ["config_unit_test.cc"], + deps = [ + "//contrib/kafka/filters/network/source:kafka_broker_config_lib", + "//test/mocks/server:factory_context_mocks", + ], +) + +envoy_cc_test( + name = "filter_unit_test", + srcs = ["filter_unit_test.cc"], + deps = [ + "//contrib/kafka/filters/network/source:kafka_broker_filter_lib", + "//envoy/event:timer_interface", + "//test/mocks/network:network_mocks", + "//test/mocks/stats:stats_mocks", + ], +) + +envoy_cc_test( + name = "filter_protocol_test", + srcs = ["filter_protocol_test.cc"], + deps = [ + "//contrib/kafka/filters/network/source:kafka_broker_filter_lib", + "//contrib/kafka/filters/network/test:buffer_based_test_lib", + "//contrib/kafka/filters/network/test:message_utilities", + "//test/common/stats:stat_test_utility_lib", + "//test/test_common:test_time_lib", + ], +) diff --git a/test/extensions/filters/network/kafka/broker/config_unit_test.cc b/contrib/kafka/filters/network/test/broker/config_unit_test.cc similarity index 94% rename from test/extensions/filters/network/kafka/broker/config_unit_test.cc rename to contrib/kafka/filters/network/test/broker/config_unit_test.cc index 4d447883174e7..4ec484e5ca989 100644 --- a/test/extensions/filters/network/kafka/broker/config_unit_test.cc +++ b/contrib/kafka/filters/network/test/broker/config_unit_test.cc @@ -1,7 +1,6 @@ -#include "source/extensions/filters/network/kafka/broker/config.h" - #include "test/mocks/server/factory_context.h" +#include "contrib/kafka/filters/network/source/broker/config.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/broker/filter_protocol_test.cc b/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc similarity index 94% rename from test/extensions/filters/network/kafka/broker/filter_protocol_test.cc rename to contrib/kafka/filters/network/test/broker/filter_protocol_test.cc index 17ef7a3f084a2..8d790b14806eb 100644 --- a/test/extensions/filters/network/kafka/broker/filter_protocol_test.cc +++ b/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc @@ -5,15 +5,15 @@ #include "source/common/common/utility.h" #include "source/common/stats/isolated_store_impl.h" -#include "source/extensions/filters/network/kafka/broker/filter.h" -#include "source/extensions/filters/network/kafka/external/requests.h" -#include "source/extensions/filters/network/kafka/external/responses.h" #include "test/common/stats/stat_test_utility.h" -#include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/extensions/filters/network/kafka/message_utilities.h" #include "test/test_common/test_time.h" +#include "contrib/kafka/filters/network/source/broker/filter.h" +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/external/responses.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/message_utilities.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/test/extensions/filters/network/kafka/broker/filter_unit_test.cc b/contrib/kafka/filters/network/test/broker/filter_unit_test.cc similarity index 98% rename from test/extensions/filters/network/kafka/broker/filter_unit_test.cc rename to contrib/kafka/filters/network/test/broker/filter_unit_test.cc index d62ca8a68700a..a91316250db89 100644 --- a/test/extensions/filters/network/kafka/broker/filter_unit_test.cc +++ b/contrib/kafka/filters/network/test/broker/filter_unit_test.cc @@ -1,11 +1,10 @@ #include "envoy/event/timer.h" -#include "source/extensions/filters/network/kafka/broker/filter.h" -#include "source/extensions/filters/network/kafka/external/requests.h" - #include "test/mocks/network/mocks.h" #include "test/mocks/stats/mocks.h" +#include "contrib/kafka/filters/network/source/broker/filter.h" +#include "contrib/kafka/filters/network/source/external/requests.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/broker/integration_test/BUILD b/contrib/kafka/filters/network/test/broker/integration_test/BUILD similarity index 66% rename from test/extensions/filters/network/kafka/broker/integration_test/BUILD rename to contrib/kafka/filters/network/test/broker/integration_test/BUILD index b3237a2188876..080c2a21a3805 100644 --- a/test/extensions/filters/network/kafka/broker/integration_test/BUILD +++ b/contrib/kafka/filters/network/test/broker/integration_test/BUILD @@ -1,33 +1,29 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_package", -) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_py_test", + "envoy_contrib_package", + "envoy_py_test", ) load("@kafka_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 -envoy_package() +envoy_contrib_package() -envoy_extension_py_test( +# This test sets up multiple services, and this can take variable amount of time (30-60 seconds). +envoy_py_test( name = "kafka_broker_integration_test", srcs = [ "kafka_broker_integration_test.py", "@kafka_python_client//:all", ], data = [ - "//source/exe:envoy-static", + "//contrib/exe:envoy-static", "//bazel:remote_jdk11", "@kafka_server_binary//:all", ] + glob(["*.j2"]), - extension_names = ["envoy.filters.network.kafka_broker"], flaky = True, python_version = "PY3", srcs_version = "PY3", - tags = ["manual"], deps = [ requirement("Jinja2"), requirement("MarkupSafe"), diff --git a/test/extensions/filters/network/kafka/broker/integration_test/README.md b/contrib/kafka/filters/network/test/broker/integration_test/README.md similarity index 89% rename from test/extensions/filters/network/kafka/broker/integration_test/README.md rename to contrib/kafka/filters/network/test/broker/integration_test/README.md index c0356f486ad77..ba8377cfdb835 100644 --- a/test/extensions/filters/network/kafka/broker/integration_test/README.md +++ b/contrib/kafka/filters/network/test/broker/integration_test/README.md @@ -19,6 +19,6 @@ The tests verify if: ``` bazel test \ - //test/extensions/filters/network/kafka/broker/integration_test:kafka_broker_integration_test \ + //contrib/kafka/filters/network/test/broker/integration_test:kafka_broker_integration_test \ --runs_per_test 1000 ``` diff --git a/test/extensions/filters/network/kafka/broker/integration_test/envoy_config_yaml.j2 b/contrib/kafka/filters/network/test/broker/integration_test/envoy_config_yaml.j2 similarity index 97% rename from test/extensions/filters/network/kafka/broker/integration_test/envoy_config_yaml.j2 rename to contrib/kafka/filters/network/test/broker/integration_test/envoy_config_yaml.j2 index 085413792af77..af945c5c61d7b 100644 --- a/test/extensions/filters/network/kafka/broker/integration_test/envoy_config_yaml.j2 +++ b/contrib/kafka/filters/network/test/broker/integration_test/envoy_config_yaml.j2 @@ -28,7 +28,6 @@ static_resources: address: 127.0.0.1 port_value: {{ data['kafka_real_port'] }} admin: - access_log_path: /dev/null profile_path: /dev/null address: socket_address: { address: 127.0.0.1, port_value: {{ data['envoy_monitoring_port'] }} } diff --git a/test/extensions/filters/network/kafka/broker/integration_test/kafka_broker_integration_test.py b/contrib/kafka/filters/network/test/broker/integration_test/kafka_broker_integration_test.py similarity index 98% rename from test/extensions/filters/network/kafka/broker/integration_test/kafka_broker_integration_test.py rename to contrib/kafka/filters/network/test/broker/integration_test/kafka_broker_integration_test.py index e44e6d1e4c0bd..01a02d62e1531 100644 --- a/test/extensions/filters/network/kafka/broker/integration_test/kafka_broker_integration_test.py +++ b/contrib/kafka/filters/network/test/broker/integration_test/kafka_broker_integration_test.py @@ -441,15 +441,15 @@ def find_java(): @staticmethod def find_envoy(): """ - This method locates envoy binary. - It's present at ./source/exe/envoy-static (at least for mac/bazel-asan/bazel-tsan), - or at ./external/envoy/source/exe/envoy-static (for bazel-compile_time_options). - """ + This method locates envoy binary. + It's present at ./source/exe/envoy-static (at least for mac/bazel-asan/bazel-tsan), + or at ./external/envoy/source/exe/envoy-static (for bazel-compile_time_options). + """ - candidate = os.path.join('.', 'source', 'exe', 'envoy-static') + candidate = os.path.join('.', 'contrib', 'exe', 'envoy-static') if os.path.isfile(candidate): return candidate - candidate = os.path.join('.', 'external', 'envoy', 'source', 'exe', 'envoy-static') + candidate = os.path.join('.', 'external', 'envoy', 'contrib', 'exe', 'envoy-static') if os.path.isfile(candidate): return candidate raise Exception("Could not find Envoy") diff --git a/test/extensions/filters/network/kafka/broker/integration_test/kafka_server_properties.j2 b/contrib/kafka/filters/network/test/broker/integration_test/kafka_server_properties.j2 similarity index 100% rename from test/extensions/filters/network/kafka/broker/integration_test/kafka_server_properties.j2 rename to contrib/kafka/filters/network/test/broker/integration_test/kafka_server_properties.j2 diff --git a/test/extensions/filters/network/kafka/broker/integration_test/zookeeper_properties.j2 b/contrib/kafka/filters/network/test/broker/integration_test/zookeeper_properties.j2 similarity index 100% rename from test/extensions/filters/network/kafka/broker/integration_test/zookeeper_properties.j2 rename to contrib/kafka/filters/network/test/broker/integration_test/zookeeper_properties.j2 diff --git a/test/extensions/filters/network/kafka/buffer_based_test.h b/contrib/kafka/filters/network/test/buffer_based_test.h similarity index 95% rename from test/extensions/filters/network/kafka/buffer_based_test.h rename to contrib/kafka/filters/network/test/buffer_based_test.h index a762fb3a9ec74..6a6ffff3b5392 100644 --- a/test/extensions/filters/network/kafka/buffer_based_test.h +++ b/contrib/kafka/filters/network/test/buffer_based_test.h @@ -1,10 +1,10 @@ #pragma once #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/kafka/serialization.h" #include "absl/container/fixed_array.h" #include "absl/strings/string_view.h" +#include "contrib/kafka/filters/network/source/serialization.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/kafka/kafka_request_parser_test.cc b/contrib/kafka/filters/network/test/kafka_request_parser_test.cc similarity index 96% rename from test/extensions/filters/network/kafka/kafka_request_parser_test.cc rename to contrib/kafka/filters/network/test/kafka_request_parser_test.cc index deeb6309bea91..c2a0436cadf3d 100644 --- a/test/extensions/filters/network/kafka/kafka_request_parser_test.cc +++ b/contrib/kafka/filters/network/test/kafka_request_parser_test.cc @@ -1,8 +1,6 @@ -#include "source/extensions/filters/network/kafka/kafka_request_parser.h" - -#include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/extensions/filters/network/kafka/serialization_utilities.h" - +#include "contrib/kafka/filters/network/source/kafka_request_parser.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" #include "gmock/gmock.h" using testing::_; diff --git a/test/extensions/filters/network/kafka/kafka_response_parser_test.cc b/contrib/kafka/filters/network/test/kafka_response_parser_test.cc similarity index 96% rename from test/extensions/filters/network/kafka/kafka_response_parser_test.cc rename to contrib/kafka/filters/network/test/kafka_response_parser_test.cc index 17615be344ff8..2985905d80039 100644 --- a/test/extensions/filters/network/kafka/kafka_response_parser_test.cc +++ b/contrib/kafka/filters/network/test/kafka_response_parser_test.cc @@ -1,8 +1,6 @@ -#include "source/extensions/filters/network/kafka/kafka_response_parser.h" - -#include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/extensions/filters/network/kafka/serialization_utilities.h" - +#include "contrib/kafka/filters/network/source/kafka_response_parser.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" #include "gmock/gmock.h" using testing::_; diff --git a/contrib/kafka/filters/network/test/mesh/BUILD b/contrib/kafka/filters/network/test/mesh/BUILD new file mode 100644 index 0000000000000..acff686d9e163 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/BUILD @@ -0,0 +1,75 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_cc_test_library", + "envoy_contrib_package", +) +load( + "//bazel:envoy_internal.bzl", + "envoy_external_dep_path", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_test( + name = "filter_unit_test", + srcs = ["filter_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh:filter_lib", + "//test/mocks/network:network_mocks", + ], +) + +envoy_cc_test( + name = "request_processor_unit_test", + srcs = ["request_processor_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh:request_processor_lib", + ], +) + +envoy_cc_test( + name = "abstract_command_unit_test", + srcs = ["abstract_command_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh:abstract_command_lib", + ], +) + +envoy_cc_test( + name = "upstream_kafka_facade_unit_test", + srcs = ["upstream_kafka_facade_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh:upstream_kafka_facade_lib", + "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:thread_factory_for_test_lib", + ], +) + +envoy_cc_test( + name = "upstream_kafka_client_impl_unit_test", + srcs = ["upstream_kafka_client_impl_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + ":kafka_mocks_lib", + "//contrib/kafka/filters/network/source/mesh:upstream_kafka_client_impl_lib", + "//test/mocks/event:event_mocks", + "//test/test_common:thread_factory_for_test_lib", + ], +) + +envoy_cc_test_library( + name = "kafka_mocks_lib", + srcs = [], + hdrs = ["kafka_mocks.h"], + tags = ["skip_on_windows"], + deps = [ + envoy_external_dep_path("librdkafka"), + ], +) diff --git a/test/extensions/filters/network/kafka/mesh/abstract_command_unit_test.cc b/contrib/kafka/filters/network/test/mesh/abstract_command_unit_test.cc similarity index 95% rename from test/extensions/filters/network/kafka/mesh/abstract_command_unit_test.cc rename to contrib/kafka/filters/network/test/mesh/abstract_command_unit_test.cc index e0ff2202f0971..48661edf9751c 100644 --- a/test/extensions/filters/network/kafka/mesh/abstract_command_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/abstract_command_unit_test.cc @@ -1,5 +1,4 @@ -#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" - +#include "contrib/kafka/filters/network/source/mesh/abstract_command.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/BUILD b/contrib/kafka/filters/network/test/mesh/command_handlers/BUILD new file mode 100644 index 0000000000000..18b75f4206f50 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/BUILD @@ -0,0 +1,51 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_test( + name = "produce_unit_test", + srcs = ["produce_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh/command_handlers:produce_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/stats:stats_mocks", + ], +) + +envoy_cc_test( + name = "produce_record_extractor_unit_test", + srcs = ["produce_record_extractor_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh/command_handlers:produce_record_extractor_lib", + ], +) + +envoy_cc_test( + name = "metadata_unit_test", + srcs = ["metadata_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh/command_handlers:metadata_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/stats:stats_mocks", + ], +) + +envoy_cc_test( + name = "api_versions_unit_test", + srcs = ["api_versions_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh/command_handlers:api_versions_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/stats:stats_mocks", + ], +) diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/api_versions_unit_test.cc b/contrib/kafka/filters/network/test/mesh/command_handlers/api_versions_unit_test.cc new file mode 100644 index 0000000000000..2a572bec507ba --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/api_versions_unit_test.cc @@ -0,0 +1,44 @@ +#include "contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +class MockAbstractRequestListener : public AbstractRequestListener { +public: + MOCK_METHOD(void, onRequest, (InFlightRequestSharedPtr)); + MOCK_METHOD(void, onRequestReadyForAnswer, ()); +}; + +TEST(ApiVersionsTest, shouldBeAlwaysReadyForAnswer) { + // given + MockAbstractRequestListener filter; + EXPECT_CALL(filter, onRequestReadyForAnswer()); + const RequestHeader header = {API_VERSIONS_REQUEST_API_KEY, 0, 0, absl::nullopt}; + ApiVersionsRequestHolder testee = {filter, header}; + + // when, then - invoking should immediately notify the filter. + testee.startProcessing(); + + // when, then - should always be considered finished. + const bool finished = testee.finished(); + EXPECT_TRUE(finished); + + // when, then - the computed result is always contains correct data (confirmed by integration + // tests). + const auto answer = testee.computeAnswer(); + EXPECT_EQ(answer->metadata_.api_key_, header.api_key_); + EXPECT_EQ(answer->metadata_.correlation_id_, header.correlation_id_); +} + +} // namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc b/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc new file mode 100644 index 0000000000000..d9ffda89635c8 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc @@ -0,0 +1,72 @@ +#include "contrib/kafka/filters/network/source/external/responses.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/metadata.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +class MockAbstractRequestListener : public AbstractRequestListener { +public: + MOCK_METHOD(void, onRequest, (InFlightRequestSharedPtr)); + MOCK_METHOD(void, onRequestReadyForAnswer, ()); +}; + +class MockUpstreamKafkaConfiguration : public UpstreamKafkaConfiguration { +public: + MOCK_METHOD(absl::optional, computeClusterConfigForTopic, (const std::string&), + (const)); + MOCK_METHOD((std::pair), getAdvertisedAddress, (), (const)); +}; + +TEST(MetadataTest, shouldBeAlwaysReadyForAnswer) { + // given + MockAbstractRequestListener filter; + EXPECT_CALL(filter, onRequestReadyForAnswer()); + MockUpstreamKafkaConfiguration configuration; + const std::pair advertised_address = {"host", 1234}; + EXPECT_CALL(configuration, getAdvertisedAddress()).WillOnce(Return(advertised_address)); + // First topic is going to have configuration present (42 partitions for each topic). + const ClusterConfig topic1config = {"", 42, {}}; + EXPECT_CALL(configuration, computeClusterConfigForTopic("topic1")) + .WillOnce(Return(absl::make_optional(topic1config))); + // Second topic is not going to have configuration present. + EXPECT_CALL(configuration, computeClusterConfigForTopic("topic2")) + .WillOnce(Return(absl::nullopt)); + const RequestHeader header = {0, 0, 0, absl::nullopt}; + const MetadataRequest data = {{MetadataRequestTopic{"topic1"}, MetadataRequestTopic{"topic2"}}}; + const auto message = std::make_shared>(header, data); + MetadataRequestHolder testee = {filter, configuration, message}; + + // when, then - invoking should immediately notify the filter. + testee.startProcessing(); + + // when, then - should always be considered finished. + const bool finished = testee.finished(); + EXPECT_TRUE(finished); + + // when, then - the computed result is always contains correct data (confirmed by integration + // tests). + const auto answer = testee.computeAnswer(); + EXPECT_EQ(answer->metadata_.api_key_, header.api_key_); + EXPECT_EQ(answer->metadata_.correlation_id_, header.correlation_id_); + + const auto response = std::dynamic_pointer_cast>(answer); + ASSERT_TRUE(response); + const auto topics = response->data_.topics_; + EXPECT_EQ(topics.size(), 1); + EXPECT_EQ(topics[0].partitions_.size(), 42); +} + +} // namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/produce_record_extractor_unit_test.cc b/contrib/kafka/filters/network/test/mesh/command_handlers/produce_record_extractor_unit_test.cc new file mode 100644 index 0000000000000..068aa40c1334b --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/produce_record_extractor_unit_test.cc @@ -0,0 +1,243 @@ +#include + +#include "test/test_common/utility.h" + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +// Simple matcher that verifies that the input given is a collection containing correct number of +// unique (!) records for given topic-partition pairs. +MATCHER_P3(HasRecords, topic, partition, expected, "") { + size_t expected_count = expected; + std::set saved_key_pointers = {}; + std::set saved_value_pointers = {}; + size_t count = 0; + + for (const auto& record : arg) { + if (record.topic_ == topic && record.partition_ == partition) { + saved_key_pointers.insert(record.key_); + saved_value_pointers.insert(record.value_); + ++count; + } + } + + if (expected_count != count) { + return false; + } + if (expected_count != saved_key_pointers.size()) { + return false; + } + return saved_key_pointers.size() == saved_value_pointers.size(); +} + +// Helper function to create a record batch that contains a single record with 5-byte key and 5-byte +// value. +Bytes makeGoodRecordBatch() { + // Record batch bytes get ignored (apart from magic field), so we can put 0 there. + Bytes result = Bytes(16 + 1 + 44); + result[16] = 2; // Record batch magic value. + Bytes real_data = {/* Length = 36 */ 72, + /* Attributes */ 0, + /* Timestamp delta */ 0, + /* Offset delta */ 0, + /* Key length = 5 */ 10, + 107, + 107, + 107, + 107, + 107, + /* Value length = 5 */ 10, + 118, + 118, + 118, + 118, + 118, + /* Headers count = 2 */ 4, + /* Header key length = 3 */ 6, + 49, + 49, + 49, + /* Header value length = 5 */ 10, + 97, + 97, + 97, + 97, + 97, + /* Header key length = 3 */ 6, + 50, + 50, + 50, + /* Header value length = 5 */ 10, + 98, + 98, + 98, + 98, + 98}; + result.insert(result.end(), real_data.begin(), real_data.end()); + return result; +} + +TEST(RecordExtractorImpl, shouldProcessRecordBytes) { + // given + const RecordExtractorImpl testee; + + const PartitionProduceData t1_ppd1 = {0, makeGoodRecordBatch()}; + const PartitionProduceData t1_ppd2 = {1, makeGoodRecordBatch()}; + const PartitionProduceData t1_ppd3 = {2, makeGoodRecordBatch()}; + const TopicProduceData tpd1 = {"topic1", {t1_ppd1, t1_ppd2, t1_ppd3}}; + + // Weird input from client, protocol allows sending null value as bytes array. + const PartitionProduceData t2_ppd = {20, absl::nullopt}; + const TopicProduceData tpd2 = {"topic2", {t2_ppd}}; + + const std::vector input = {tpd1, tpd2}; + + // when + const auto result = testee.extractRecords(input); + + // then + EXPECT_THAT(result, HasRecords("topic1", 0, 1)); + EXPECT_THAT(result, HasRecords("topic1", 1, 1)); + EXPECT_THAT(result, HasRecords("topic1", 2, 1)); + EXPECT_THAT(result, HasRecords("topic2", 20, 0)); +} + +/** + * Helper function to make record batch (batch contains 1+ records). + * We use 'stage' parameter to make it a single function with various failure modes. + */ +const std::vector makeTopicProduceData(const unsigned int stage) { + Bytes bytes = makeGoodRecordBatch(); + if (1 == stage) { + // No common fields before magic. + bytes.erase(bytes.begin(), bytes.end()); + } + if (2 == stage) { + // No magic. + bytes.erase(bytes.begin() + 16, bytes.end()); + } + if (3 == stage) { + // Bad magic. + bytes[16] = 42; + } + if (4 == stage) { + // No common fields after magic. + bytes.erase(bytes.begin() + 17, bytes.end()); + } + if (5 == stage) { + // No record length after common fields. + bytes[61] = 128; // This will force variable-length deserializer to wait for more bytes. + bytes.erase(bytes.begin() + 62, bytes.end()); + } + if (6 == stage) { + // Record length is higher than size of real data. + bytes.erase(bytes.begin() + 62, bytes.end()); + } + if (7 == stage) { + // Attributes field has negative length. + bytes[61] = 3; /* -1 */ + bytes.erase(bytes.begin() + 62, bytes.end()); + } + if (8 == stage) { + // Attributes field is missing - length is valid, but there is no more data to read. + bytes[61] = 0; + bytes.erase(bytes.begin() + 62, bytes.end()); + } + if (9 == stage) { + // Header count not present - we are going to drop all 21 header bytes after value. + bytes[61] = (36 - 21) << 1; // Length is encoded as variable length. + bytes.erase(bytes.begin() + 77, bytes.end()); + } + if (10 == stage) { + // Negative variable length integer for header count. + bytes[77] = 17; + } + if (11 == stage) { + // Last header value is going to be shorter, so there will be one unconsumed byte. + bytes[92] = 8; + } + const PartitionProduceData ppd = {0, bytes}; + const TopicProduceData tpd = {"topic", {ppd}}; + return {tpd}; +} + +TEST(RecordExtractorImpl, shouldHandleInvalidRecordBytes) { + const RecordExtractorImpl testee; + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(1)), EnvoyException, + "no common fields"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(2)), EnvoyException, + "magic byte is not present"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(3)), EnvoyException, + "unknown magic value"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(4)), EnvoyException, + "no attribute fields"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(5)), EnvoyException, + "no length"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(6)), EnvoyException, + "not enough bytes provided"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(7)), EnvoyException, + "has invalid length"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(8)), EnvoyException, + "attributes not present"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(9)), EnvoyException, + "header count not present"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(10)), EnvoyException, + "invalid header count"); + EXPECT_THROW_WITH_REGEX(testee.extractRecords(makeTopicProduceData(11)), EnvoyException, + "data left after consuming record"); +} + +// Minor helper function. +absl::string_view bytesToStringView(const Bytes& bytes) { + return {reinterpret_cast(bytes.data()), bytes.size()}; +} + +TEST(RecordExtractorImpl, shouldExtractByteArray) { + { + const Bytes noBytes = Bytes(0); + auto arg = bytesToStringView(noBytes); + EXPECT_THROW_WITH_REGEX(RecordExtractorImpl::extractByteArray(arg), EnvoyException, + "byte array length not present"); + } + { + const Bytes nullValueBytes = {0b00000001}; // Length = -1. + auto arg = bytesToStringView(nullValueBytes); + EXPECT_EQ(RecordExtractorImpl::extractByteArray(arg), absl::string_view()); + } + { + const Bytes negativeLengthBytes = {0b01111111}; // Length = -64. + auto arg = bytesToStringView(negativeLengthBytes); + EXPECT_THROW_WITH_REGEX(RecordExtractorImpl::extractByteArray(arg), EnvoyException, + "byte array length less than -1: -64"); + } + { + const Bytes bigLengthBytes = {0b01111110}; // Length = 63. + auto arg = bytesToStringView(bigLengthBytes); + EXPECT_THROW_WITH_REGEX(RecordExtractorImpl::extractByteArray(arg), EnvoyException, + "byte array length larger than data provided: 63 vs 0"); + } + { + // Length = 4, 7 bytes follow, 4 should be consumed, 13s should stay unconsumed. + const Bytes goodBytes = {0b00001000, 42, 42, 42, 42, 13, 13, 13}; + auto arg = bytesToStringView(goodBytes); + EXPECT_EQ(RecordExtractorImpl::extractByteArray(arg), + absl::string_view(reinterpret_cast(goodBytes.data() + 1), 4)); + EXPECT_EQ(arg.data(), reinterpret_cast(goodBytes.data() + 5)); + EXPECT_EQ(arg.size(), 3); + } +} + +} // namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc b/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc new file mode 100644 index 0000000000000..efa05e82e1f88 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc @@ -0,0 +1,270 @@ +#include + +#include "test/test_common/utility.h" + +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/external/responses.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +class MockAbstractRequestListener : public AbstractRequestListener { +public: + MOCK_METHOD(void, onRequest, (InFlightRequestSharedPtr)); + MOCK_METHOD(void, onRequestReadyForAnswer, ()); +}; + +class MockRecordExtractor : public RecordExtractor { +public: + MOCK_METHOD((std::vector), extractRecords, (const std::vector&), + (const)); +}; + +class MockUpstreamKafkaFacade : public UpstreamKafkaFacade { +public: + MOCK_METHOD(KafkaProducer&, getProducerForTopic, (const std::string&)); +}; + +class MockKafkaProducer : public KafkaProducer { +public: + MOCK_METHOD(void, send, + (const ProduceFinishCbSharedPtr, const std::string&, const int32_t, + const absl::string_view, const absl::string_view)); + MOCK_METHOD(void, markFinished, (), ()); +}; + +class ProduceUnitTest : public testing::Test { +protected: + MockAbstractRequestListener filter_; + MockUpstreamKafkaFacade upstream_kafka_facade_; + MockRecordExtractor extractor_; +}; + +// This is very odd corner case, that should never happen +// (as ProduceRequests with no topics/records make no sense). +TEST_F(ProduceUnitTest, ShouldHandleProduceRequestWithNoRecords) { + // given + MockRecordExtractor extractor; + const std::vector records = {}; + EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); + + const RequestHeader header = {0, 0, 0, absl::nullopt}; + const ProduceRequest data = {0, 0, {}}; + const auto message = std::make_shared>(header, data); + ProduceRequestHolder testee = {filter_, upstream_kafka_facade_, extractor_, message}; + + // when, then - invoking should immediately notify the filter. + EXPECT_CALL(filter_, onRequestReadyForAnswer()); + testee.startProcessing(); + + // when, then - request is finished because there was nothing to do. + const bool finished = testee.finished(); + EXPECT_TRUE(finished); + + // when, then - answer is empty. + const auto answer = testee.computeAnswer(); + EXPECT_EQ(answer->metadata_.api_key_, header.api_key_); + EXPECT_EQ(answer->metadata_.correlation_id_, header.correlation_id_); +} + +// Typical flow without errors. +// The produce request has 2 records, that should be mapped to 2 different Kafka installations. +// The response should contain the values returned by Kafka broker. +TEST_F(ProduceUnitTest, ShouldSendRecordsInNormalFlow) { + // given + const OutboundRecord r1 = {"t1", 13, "aaa", "bbb"}; + const OutboundRecord r2 = {"t2", 42, "ccc", "ddd"}; + const std::vector records = {r1, r2}; + EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); + + const RequestHeader header = {0, 0, 0, absl::nullopt}; + const ProduceRequest data = {0, 0, {}}; + const auto message = std::make_shared>(header, data); + std::shared_ptr testee = + std::make_shared(filter_, upstream_kafka_facade_, extractor_, message); + + // when, then - invoking should use producers to send records. + MockKafkaProducer producer1; + EXPECT_CALL(producer1, send(_, r1.topic_, r1.partition_, _, _)); + MockKafkaProducer producer2; + EXPECT_CALL(producer2, send(_, r2.topic_, r2.partition_, _, _)); + EXPECT_CALL(upstream_kafka_facade_, getProducerForTopic(r1.topic_)) + .WillOnce(ReturnRef(producer1)); + EXPECT_CALL(upstream_kafka_facade_, getProducerForTopic(r2.topic_)) + .WillOnce(ReturnRef(producer2)); + testee->startProcessing(); + + // when, then - request is not yet finished (2 records' delivery to be confirmed). + EXPECT_FALSE(testee->finished()); + + // when, then - first record should be delivered. + const DeliveryMemento dm1 = {r1.value_.data(), 0, 123}; + EXPECT_TRUE(testee->accept(dm1)); + EXPECT_FALSE(testee->finished()); + + const DeliveryMemento dm2 = {r2.value_.data(), 0, 234}; + // After all the deliveries have been confirmed, the filter is getting notified. + EXPECT_CALL(filter_, onRequestReadyForAnswer()); + EXPECT_TRUE(testee->accept(dm2)); + EXPECT_TRUE(testee->finished()); + + // when, then - answer gets computed and contains results. + const auto answer = testee->computeAnswer(); + EXPECT_EQ(answer->metadata_.api_key_, header.api_key_); + EXPECT_EQ(answer->metadata_.correlation_id_, header.correlation_id_); + + const auto response = std::dynamic_pointer_cast>(answer); + ASSERT_TRUE(response); + const std::vector responses = response->data_.responses_; + EXPECT_EQ(responses.size(), 2); + EXPECT_EQ(responses[0].partitions_[0].error_code_, dm1.error_code_); + EXPECT_EQ(responses[0].partitions_[0].base_offset_, dm1.offset_); + EXPECT_EQ(responses[1].partitions_[0].error_code_, dm2.error_code_); + EXPECT_EQ(responses[1].partitions_[0].base_offset_, dm2.offset_); +} + +// Typical flow without errors. +// The produce request has 2 records, both pointing to same partition. +// Given that usually we cannot make any guarantees on how Kafka producer is going to append the +// records (as it depends on configuration like max number of records in flight), the first record +// is going to be saved on a bigger offset. +TEST_F(ProduceUnitTest, ShouldMergeOutboundRecordResponses) { + // given + const OutboundRecord r1 = {"t1", 13, "aaa", "bbb"}; + const OutboundRecord r2 = {r1.topic_, r1.partition_, "ccc", "ddd"}; + const std::vector records = {r1, r2}; + EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); + + const RequestHeader header = {0, 0, 0, absl::nullopt}; + const ProduceRequest data = {0, 0, {}}; + const auto message = std::make_shared>(header, data); + std::shared_ptr testee = + std::make_shared(filter_, upstream_kafka_facade_, extractor_, message); + + // when, then - invoking should use producers to send records. + MockKafkaProducer producer; + EXPECT_CALL(producer, send(_, r1.topic_, r1.partition_, _, _)).Times(2); + EXPECT_CALL(upstream_kafka_facade_, getProducerForTopic(r1.topic_)) + .WillRepeatedly(ReturnRef(producer)); + testee->startProcessing(); + + // when, then - request is not yet finished (2 records' delivery to be confirmed). + EXPECT_FALSE(testee->finished()); + + // when, then - first record should be delivered. + const DeliveryMemento dm1 = {r1.value_.data(), 0, 4242}; + EXPECT_TRUE(testee->accept(dm1)); + EXPECT_FALSE(testee->finished()); + + const DeliveryMemento dm2 = {r2.value_.data(), 0, 1313}; + // After all the deliveries have been confirmed, the filter is getting notified. + EXPECT_CALL(filter_, onRequestReadyForAnswer()); + EXPECT_TRUE(testee->accept(dm2)); + EXPECT_TRUE(testee->finished()); + + // when, then - answer gets computed and contains results. + const auto answer = testee->computeAnswer(); + EXPECT_EQ(answer->metadata_.api_key_, header.api_key_); + EXPECT_EQ(answer->metadata_.correlation_id_, header.correlation_id_); + + const auto response = std::dynamic_pointer_cast>(answer); + ASSERT_TRUE(response); + const std::vector responses = response->data_.responses_; + EXPECT_EQ(responses.size(), 1); + EXPECT_EQ(responses[0].partitions_.size(), 1); + EXPECT_EQ(responses[0].partitions_[0].error_code_, 0); + EXPECT_EQ(responses[0].partitions_[0].base_offset_, 1313); +} + +// Flow with errors. +// The produce request has 2 records, both pointing to same partition. +// The first record is going to fail. +// We are going to treat whole delivery as failure. +// Bear in mind second record could get accepted, this is a difference between normal client and +// proxy (this is going to be amended when we manage to send whole record batch). +TEST_F(ProduceUnitTest, ShouldHandleDeliveryErrors) { + // given + const OutboundRecord r1 = {"t1", 13, "aaa", "bbb"}; + const OutboundRecord r2 = {r1.topic_, r1.partition_, "ccc", "ddd"}; + const std::vector records = {r1, r2}; + EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); + + const RequestHeader header = {0, 0, 0, absl::nullopt}; + const ProduceRequest data = {0, 0, {}}; + const auto message = std::make_shared>(header, data); + std::shared_ptr testee = + std::make_shared(filter_, upstream_kafka_facade_, extractor_, message); + + // when, then - invoking should use producers to send records. + MockKafkaProducer producer; + EXPECT_CALL(producer, send(_, r1.topic_, r1.partition_, _, _)).Times(2); + EXPECT_CALL(upstream_kafka_facade_, getProducerForTopic(r1.topic_)) + .WillRepeatedly(ReturnRef(producer)); + testee->startProcessing(); + + // when, then - request is not yet finished (2 records' delivery to be confirmed). + EXPECT_FALSE(testee->finished()); + + // when, then - first record fails. + const DeliveryMemento dm1 = {r1.value_.data(), 42, 0}; + EXPECT_TRUE(testee->accept(dm1)); + EXPECT_FALSE(testee->finished()); + + // when, then - second record succeeds (we are going to ignore the result). + const DeliveryMemento dm2 = {r2.value_.data(), 0, 234}; + // After all the deliveries have been confirmed, the filter is getting notified. + EXPECT_CALL(filter_, onRequestReadyForAnswer()); + EXPECT_TRUE(testee->accept(dm2)); + EXPECT_TRUE(testee->finished()); + + // when, then - answer gets computed and contains results. + const auto answer = testee->computeAnswer(); + EXPECT_EQ(answer->metadata_.api_key_, header.api_key_); + EXPECT_EQ(answer->metadata_.correlation_id_, header.correlation_id_); + + const auto response = std::dynamic_pointer_cast>(answer); + ASSERT_TRUE(response); + const std::vector responses = response->data_.responses_; + EXPECT_EQ(responses.size(), 1); + EXPECT_EQ(responses[0].partitions_[0].error_code_, dm1.error_code_); +} + +// As with current version of Kafka library we have no capability of linking producer's notification +// to sent record (other than data address), the owner of this request is going to do a check across +// all owned requests. What means sometimes we might get asked to accept a memento of record that +// did not originate in this request, so it should be ignored. +TEST_F(ProduceUnitTest, ShouldIgnoreMementoFromAnotherRequest) { + // given + const OutboundRecord r1 = {"t1", 13, "aaa", "bbb"}; + const std::vector records = {r1}; + EXPECT_CALL(extractor_, extractRecords(_)).WillOnce(Return(records)); + + const RequestHeader header = {0, 0, 0, absl::nullopt}; + const ProduceRequest data = {0, 0, {}}; + const auto message = std::make_shared>(header, data); + std::shared_ptr testee = + std::make_shared(filter_, upstream_kafka_facade_, extractor_, message); + + // when, then - this record will not match anything. + const DeliveryMemento dm = {nullptr, 0, 42}; + EXPECT_FALSE(testee->accept(dm)); + EXPECT_FALSE(testee->finished()); +} + +} // namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/kafka/mesh/filter_unit_test.cc b/contrib/kafka/filters/network/test/mesh/filter_unit_test.cc similarity index 86% rename from test/extensions/filters/network/kafka/mesh/filter_unit_test.cc rename to contrib/kafka/filters/network/test/mesh/filter_unit_test.cc index 07fffa2466a0e..340859067759e 100644 --- a/test/extensions/filters/network/kafka/mesh/filter_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/filter_unit_test.cc @@ -1,7 +1,6 @@ -#include "source/extensions/filters/network/kafka/mesh/filter.h" - #include "test/mocks/network/mocks.h" +#include "contrib/kafka/filters/network/source/mesh/filter.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -176,6 +175,31 @@ TEST_F(FilterUnitTest, ShouldDoNothingOnBufferWatermarkEvents) { testee_.onAboveWriteBufferHighWatermark(); } +class MockUpstreamKafkaConfiguration : public UpstreamKafkaConfiguration { +public: + MOCK_METHOD(void, onData, (Buffer::Instance&)); + MOCK_METHOD(void, reset, ()); + MOCK_METHOD(absl::optional, computeClusterConfigForTopic, + (const std::string& topic), (const)); + MOCK_METHOD((std::pair), getAdvertisedAddress, (), (const)); +}; + +class MockUpstreamKafkaFacade : public UpstreamKafkaFacade { +public: + MOCK_METHOD(KafkaProducer&, getProducerForTopic, (const std::string&)); +}; + +TEST(Filter, ShouldBeConstructable) { + // given + MockUpstreamKafkaConfiguration configuration; + MockUpstreamKafkaFacade upstream_kafka_facade; + + // when + KafkaMeshFilter filter = KafkaMeshFilter(configuration, upstream_kafka_facade); + + // then - no exceptions. +} + } // namespace } // namespace Mesh } // namespace Kafka diff --git a/contrib/kafka/filters/network/test/mesh/kafka_mocks.h b/contrib/kafka/filters/network/test/mesh/kafka_mocks.h new file mode 100644 index 0000000000000..ba8996118b0f4 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/kafka_mocks.h @@ -0,0 +1,98 @@ +#pragma once + +#include "gmock/gmock.h" +#include "librdkafka/rdkafkacpp.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +class MockKafkaProducer : public RdKafka::Producer { +public: + // Producer API. + MOCK_METHOD(RdKafka::ErrorCode, produce, + (RdKafka::Topic*, int32_t, int, void*, size_t, const std::string*, void*), ()); + MOCK_METHOD(RdKafka::ErrorCode, produce, + (RdKafka::Topic*, int32_t, int, void*, size_t, const void*, size_t, void*), ()); + MOCK_METHOD(RdKafka::ErrorCode, produce, + (const std::string, int32_t, int, void*, size_t, const void*, size_t, int64_t, void*), + ()); + MOCK_METHOD(RdKafka::ErrorCode, produce, + (const std::string, int32_t, int, void*, size_t, const void*, size_t, int64_t, + RdKafka::Headers*, void*), + ()); + MOCK_METHOD(RdKafka::ErrorCode, produce, + (RdKafka::Topic*, int32_t, const std::vector*, const std::vector*, void*), + ()); + MOCK_METHOD(RdKafka::ErrorCode, flush, (int), ()); + MOCK_METHOD(RdKafka::ErrorCode, purge, (int), ()); + MOCK_METHOD(RdKafka::Error*, init_transactions, (int), ()); + MOCK_METHOD(RdKafka::Error*, begin_transaction, (), ()); + MOCK_METHOD(RdKafka::Error*, send_offsets_to_transaction, + (const std::vector&, const RdKafka::ConsumerGroupMetadata*, + int), + ()); + MOCK_METHOD(RdKafka::Error*, commit_transaction, (int), ()); + MOCK_METHOD(RdKafka::Error*, abort_transaction, (int), ()); + + // Handle API (unused by us). + MOCK_METHOD(const std::string, name, (), (const)); + MOCK_METHOD(const std::string, memberid, (), (const)); + MOCK_METHOD(int, poll, (int), ()); + MOCK_METHOD(int, outq_len, (), ()); + MOCK_METHOD(RdKafka::ErrorCode, metadata, + (bool, const RdKafka::Topic*, RdKafka::Metadata**, int timout_ms), ()); + MOCK_METHOD(RdKafka::ErrorCode, pause, (std::vector&), ()); + MOCK_METHOD(RdKafka::ErrorCode, resume, (std::vector&), ()); + MOCK_METHOD(RdKafka::ErrorCode, query_watermark_offsets, + (const std::string&, int32_t, int64_t*, int64_t*, int), ()); + MOCK_METHOD(RdKafka::ErrorCode, get_watermark_offsets, + (const std::string&, int32_t, int64_t*, int64_t*), ()); + MOCK_METHOD(RdKafka::ErrorCode, offsetsForTimes, (std::vector&, int), + ()); + MOCK_METHOD(RdKafka::Queue*, get_partition_queue, (const RdKafka::TopicPartition*), ()); + MOCK_METHOD(RdKafka::ErrorCode, set_log_queue, (RdKafka::Queue*), ()); + MOCK_METHOD(void, yield, (), ()); + MOCK_METHOD(const std::string, clusterid, (int), ()); + MOCK_METHOD(struct rd_kafka_s*, c_ptr, (), ()); + MOCK_METHOD(int32_t, controllerid, (int), ()); + MOCK_METHOD(RdKafka::ErrorCode, fatal_error, (std::string&), (const)); + MOCK_METHOD(RdKafka::ErrorCode, oauthbearer_set_token, + (const std::string&, int64_t, const std::string&, const std::list&, + std::string&), + ()); + MOCK_METHOD(RdKafka::ErrorCode, oauthbearer_set_token_failure, (const std::string&), ()); + MOCK_METHOD(void*, mem_malloc, (size_t), ()); + MOCK_METHOD(void, mem_free, (void*), ()); +}; + +class MockKafkaMessage : public RdKafka::Message { +public: + MOCK_METHOD(std::string, errstr, (), (const)); + MOCK_METHOD(RdKafka::ErrorCode, err, (), (const)); + MOCK_METHOD(RdKafka::Topic*, topic, (), (const)); + MOCK_METHOD(std::string, topic_name, (), (const)); + MOCK_METHOD(int32_t, partition, (), (const)); + MOCK_METHOD(void*, payload, (), (const)); + MOCK_METHOD(size_t, len, (), (const)); + MOCK_METHOD(const std::string*, key, (), (const)); + MOCK_METHOD(const void*, key_pointer, (), (const)); + MOCK_METHOD(size_t, key_len, (), (const)); + MOCK_METHOD(int64_t, offset, (), (const)); + MOCK_METHOD(RdKafka::MessageTimestamp, timestamp, (), (const)); + MOCK_METHOD(void*, msg_opaque, (), (const)); + MOCK_METHOD(int64_t, latency, (), (const)); + MOCK_METHOD(struct rd_kafka_message_s*, c_ptr, ()); + MOCK_METHOD(RdKafka::Message::Status, status, (), (const)); + MOCK_METHOD(RdKafka::Headers*, headers, ()); + MOCK_METHOD(RdKafka::Headers*, headers, (RdKafka::ErrorCode*)); + MOCK_METHOD(int32_t, broker_id, (), (const)); +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc b/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc new file mode 100644 index 0000000000000..605019141e707 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc @@ -0,0 +1,119 @@ +#include "test/test_common/utility.h" + +#include "contrib/kafka/filters/network/source/mesh/abstract_command.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/metadata.h" +#include "contrib/kafka/filters/network/source/mesh/command_handlers/produce.h" +#include "contrib/kafka/filters/network/source/mesh/request_processor.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +class MockAbstractRequestListener : public AbstractRequestListener { +public: + MOCK_METHOD(void, onRequest, (InFlightRequestSharedPtr)); + MOCK_METHOD(void, onRequestReadyForAnswer, ()); +}; + +class MockUpstreamKafkaFacade : public UpstreamKafkaFacade { +public: + MOCK_METHOD(KafkaProducer&, getProducerForTopic, (const std::string&)); +}; + +class MockUpstreamKafkaConfiguration : public UpstreamKafkaConfiguration { +public: + MOCK_METHOD(absl::optional, computeClusterConfigForTopic, (const std::string&), + (const)); + MOCK_METHOD((std::pair), getAdvertisedAddress, (), (const)); +}; + +class RequestProcessorTest : public testing::Test { +protected: + MockAbstractRequestListener listener_; + MockUpstreamKafkaConfiguration configuration_; + MockUpstreamKafkaFacade upstream_kafka_facade_; + RequestProcessor testee_ = {listener_, configuration_, upstream_kafka_facade_}; +}; + +TEST_F(RequestProcessorTest, ShouldProcessProduceRequest) { + // given + const RequestHeader header = {PRODUCE_REQUEST_API_KEY, 0, 0, absl::nullopt}; + const ProduceRequest data = {0, 0, {}}; + const auto message = std::make_shared>(header, data); + + InFlightRequestSharedPtr capture = nullptr; + EXPECT_CALL(listener_, onRequest(_)).WillOnce(testing::SaveArg<0>(&capture)); + + // when + testee_.onMessage(message); + + // then + ASSERT_NE(std::dynamic_pointer_cast(capture), nullptr); +} + +TEST_F(RequestProcessorTest, ShouldProcessMetadataRequest) { + // given + const RequestHeader header = {METADATA_REQUEST_API_KEY, 0, 0, absl::nullopt}; + const MetadataRequest data = {absl::nullopt}; + const auto message = std::make_shared>(header, data); + + InFlightRequestSharedPtr capture = nullptr; + EXPECT_CALL(listener_, onRequest(_)).WillOnce(testing::SaveArg<0>(&capture)); + + // when + testee_.onMessage(message); + + // then + ASSERT_NE(std::dynamic_pointer_cast(capture), nullptr); +} + +TEST_F(RequestProcessorTest, ShouldProcessApiVersionsRequest) { + // given + const RequestHeader header = {API_VERSIONS_REQUEST_API_KEY, 0, 0, absl::nullopt}; + const ApiVersionsRequest data = {}; + const auto message = std::make_shared>(header, data); + + InFlightRequestSharedPtr capture = nullptr; + EXPECT_CALL(listener_, onRequest(_)).WillOnce(testing::SaveArg<0>(&capture)); + + // when + testee_.onMessage(message); + + // then + ASSERT_NE(std::dynamic_pointer_cast(capture), nullptr); +} + +TEST_F(RequestProcessorTest, ShouldHandleUnsupportedRequest) { + // given + const RequestHeader header = {LIST_OFFSET_REQUEST_API_KEY, 0, 0, absl::nullopt}; + const ListOffsetRequest data = {0, {}}; + const auto message = std::make_shared>(header, data); + + // when, then - exception gets thrown. + EXPECT_THROW_WITH_REGEX(testee_.onMessage(message), EnvoyException, "unsupported"); +} + +TEST_F(RequestProcessorTest, ShouldHandleUnparseableRequest) { + // given + const RequestHeader header = {42, 42, 42, absl::nullopt}; + const auto arg = std::make_shared(header); + + // when, then - exception gets thrown. + EXPECT_THROW_WITH_REGEX(testee_.onFailedParse(arg), EnvoyException, "unknown"); +} + +} // anonymous namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/mesh/upstream_kafka_client_impl_unit_test.cc b/contrib/kafka/filters/network/test/mesh/upstream_kafka_client_impl_unit_test.cc new file mode 100644 index 0000000000000..c40ebd8589bc8 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/upstream_kafka_client_impl_unit_test.cc @@ -0,0 +1,211 @@ +#include "test/mocks/event/mocks.h" +#include "test/test_common/thread_factory_for_test.h" + +#include "absl/synchronization/blocking_counter.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_client_impl.h" +#include "contrib/kafka/filters/network/test/mesh/kafka_mocks.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::AnyNumber; +using testing::AtLeast; +using testing::Return; +using testing::ReturnNull; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +class MockLibRdKafkaUtils : public LibRdKafkaUtils { +public: + MOCK_METHOD(RdKafka::Conf::ConfResult, setConfProperty, + (RdKafka::Conf&, const std::string&, const std::string&, std::string&), (const)); + MOCK_METHOD(RdKafka::Conf::ConfResult, setConfDeliveryCallback, + (RdKafka::Conf&, RdKafka::DeliveryReportCb*, std::string&), (const)); + MOCK_METHOD((std::unique_ptr), createProducer, + (RdKafka::Conf*, std::string& errstr), (const)); +}; + +class MockProduceFinishCb : public ProduceFinishCb { +public: + MOCK_METHOD(bool, accept, (const DeliveryMemento&)); +}; + +class UpstreamKafkaClientTest : public testing::Test { +protected: + Event::MockDispatcher dispatcher_; + Thread::ThreadFactory& thread_factory_ = Thread::threadFactoryForTest(); + MockLibRdKafkaUtils kafka_utils_; + RawKafkaProducerConfig config_ = {{"key1", "value1"}, {"key2", "value2"}}; + + std::unique_ptr producer_ptr = std::make_unique(); + MockKafkaProducer& producer = *producer_ptr; + + std::shared_ptr origin_ = std::make_shared(); + + // Helper method - allows creation of RichKafkaProducer without problems. + void setupConstructorExpectations() { + EXPECT_CALL(kafka_utils_, setConfProperty(_, "key1", "value1", _)) + .WillOnce(Return(RdKafka::Conf::CONF_OK)); + EXPECT_CALL(kafka_utils_, setConfProperty(_, "key2", "value2", _)) + .WillOnce(Return(RdKafka::Conf::CONF_OK)); + EXPECT_CALL(kafka_utils_, setConfDeliveryCallback(_, _, _)) + .WillOnce(Return(RdKafka::Conf::CONF_OK)); + + EXPECT_CALL(producer, poll(_)).Times(AnyNumber()); + EXPECT_CALL(kafka_utils_, createProducer(_, _)) + .WillOnce(Return(testing::ByMove(std::move(producer_ptr)))); + } +}; + +TEST_F(UpstreamKafkaClientTest, ShouldConstructWithoutProblems) { + // given + setupConstructorExpectations(); + + // when, then - producer got created without problems. + RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; +} + +TEST_F(UpstreamKafkaClientTest, ShouldSendRecordsAndReceiveConfirmations) { + // given + setupConstructorExpectations(); + RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; + + // when, then - should send request without problems. + EXPECT_CALL(producer, produce("t1", 13, 0, _, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(Return(RdKafka::ERR_NO_ERROR)); + const std::vector payloads = {"value1", "value2", "value3"}; + for (const auto& arg : payloads) { + testee.send(origin_, "t1", 13, "KEY", arg); + } + EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), payloads.size()); + + // when, then - should process confirmations. + EXPECT_CALL(*origin_, accept(_)).Times(3).WillRepeatedly(Return(true)); + for (const auto& arg : payloads) { + const DeliveryMemento memento = {arg.c_str(), RdKafka::ERR_NO_ERROR, 0}; + testee.processDelivery(memento); + } + EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), 0); +} + +TEST_F(UpstreamKafkaClientTest, ShouldCheckCallbacksForDeliveries) { + // given + setupConstructorExpectations(); + RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; + + // when, then - should send request without problems. + EXPECT_CALL(producer, produce("t1", 13, 0, _, _, _, _, _, _)) + .Times(2) + .WillRepeatedly(Return(RdKafka::ERR_NO_ERROR)); + const std::vector payloads = {"value1", "value2"}; + auto origin1 = std::make_shared(); + auto origin2 = std::make_shared(); + testee.send(origin1, "t1", 13, "KEY", payloads[0]); + testee.send(origin2, "t1", 13, "KEY", payloads[1]); + EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), payloads.size()); + + // when, then - should process confirmations (notice we pass second memento first). + EXPECT_CALL(*origin1, accept(_)).WillOnce(Return(false)).WillOnce(Return(true)); + EXPECT_CALL(*origin2, accept(_)).WillOnce(Return(true)); + const DeliveryMemento memento1 = {payloads[1].c_str(), RdKafka::ERR_NO_ERROR, 0}; + testee.processDelivery(memento1); + EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), 1); + const DeliveryMemento memento2 = {payloads[0].c_str(), RdKafka::ERR_NO_ERROR, 0}; + testee.processDelivery(memento2); + EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), 0); +} + +TEST_F(UpstreamKafkaClientTest, ShouldHandleProduceFailures) { + // given + setupConstructorExpectations(); + RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; + + // when, then - if there are problems while sending, notify the source immediately. + EXPECT_CALL(producer, produce("t1", 42, 0, _, _, _, _, _, _)) + .WillOnce(Return(RdKafka::ERR_LEADER_NOT_AVAILABLE)); + EXPECT_CALL(*origin_, accept(_)).WillOnce(Return(true)); + testee.send(origin_, "t1", 42, "KEY", "VALUE"); + EXPECT_EQ(testee.getUnfinishedRequestsForTest().size(), 0); +} + +TEST_F(UpstreamKafkaClientTest, ShouldHandleKafkaCallback) { + // given + setupConstructorExpectations(); + RichKafkaProducer testee = {dispatcher_, thread_factory_, config_, kafka_utils_}; + testing::NiceMock message; + + // when, then - notification is passed to dispatcher. + EXPECT_CALL(dispatcher_, post(_)); + testee.dr_cb(message); +} + +// This handles situations when users pass bad config to raw producer. +TEST_F(UpstreamKafkaClientTest, ShouldThrowIfSettingPropertiesFails) { + // given + EXPECT_CALL(kafka_utils_, setConfProperty(_, _, _, _)) + .WillOnce(Return(RdKafka::Conf::CONF_INVALID)); + + // when, then - exception gets thrown during construction. + EXPECT_THROW(RichKafkaProducer(dispatcher_, thread_factory_, config_, kafka_utils_), + EnvoyException); +} + +TEST_F(UpstreamKafkaClientTest, ShouldThrowIfSettingDeliveryCallbackFails) { + // given + EXPECT_CALL(kafka_utils_, setConfProperty(_, _, _, _)) + .WillRepeatedly(Return(RdKafka::Conf::CONF_OK)); + EXPECT_CALL(kafka_utils_, setConfDeliveryCallback(_, _, _)) + .WillOnce(Return(RdKafka::Conf::CONF_INVALID)); + + // when, then - exception gets thrown during construction. + EXPECT_THROW(RichKafkaProducer(dispatcher_, thread_factory_, config_, kafka_utils_), + EnvoyException); +} + +TEST_F(UpstreamKafkaClientTest, ShouldThrowIfRawProducerConstructionFails) { + // given + EXPECT_CALL(kafka_utils_, setConfProperty(_, _, _, _)) + .WillRepeatedly(Return(RdKafka::Conf::CONF_OK)); + EXPECT_CALL(kafka_utils_, setConfDeliveryCallback(_, _, _)) + .WillOnce(Return(RdKafka::Conf::CONF_OK)); + EXPECT_CALL(kafka_utils_, createProducer(_, _)).WillOnce(ReturnNull()); + + // when, then - exception gets thrown during construction. + EXPECT_THROW(RichKafkaProducer(dispatcher_, thread_factory_, config_, kafka_utils_), + EnvoyException); +} + +// Rich producer's constructor starts a monitoring thread. +// We are going to wait for at least one invocation of producer 'poll', so we are confident that it +// does monitoring. Then we are going to destroy the testee, and expect the thread to finish. +TEST_F(UpstreamKafkaClientTest, ShouldPollProducerForEventsUntilShutdown) { + // given + setupConstructorExpectations(); + + absl::BlockingCounter counter{1}; + EXPECT_CALL(producer, poll(_)).Times(AtLeast(1)).WillOnce([&counter]() { + counter.DecrementCount(); + return 0; + }); + + // when + { + std::unique_ptr testee = + std::make_unique(dispatcher_, thread_factory_, config_, kafka_utils_); + counter.Wait(); + } + + // then - the above block actually finished, what means that the monitoring thread interacted with + // underlying Kafka producer. +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/mesh/upstream_kafka_facade_unit_test.cc b/contrib/kafka/filters/network/test/mesh/upstream_kafka_facade_unit_test.cc new file mode 100644 index 0000000000000..d4a0497c21476 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/upstream_kafka_facade_unit_test.cc @@ -0,0 +1,107 @@ +#include "envoy/thread/thread.h" +#include "envoy/thread_local/thread_local.h" + +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/thread_factory_for_test.h" + +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Return; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +class MockUpstreamKafkaConfiguration : public UpstreamKafkaConfiguration { +public: + MOCK_METHOD(absl::optional, computeClusterConfigForTopic, (const std::string&), + (const)); + MOCK_METHOD((std::pair), getAdvertisedAddress, (), (const)); +}; + +class MockThreadFactory : public Thread::ThreadFactory { +public: + MOCK_METHOD(Thread::ThreadPtr, createThread, (std::function, Thread::OptionsOptConstRef)); + MOCK_METHOD(Thread::ThreadId, currentThreadId, ()); +}; + +TEST(UpstreamKafkaFacadeTest, shouldCreateProducerOnlyOnceForTheSameCluster) { + // given + const std::string topic1 = "topic1"; + const std::string topic2 = "topic2"; + + MockUpstreamKafkaConfiguration configuration; + const ClusterConfig cluster_config = {"cluster", 1, {{"bootstrap.servers", "localhost:9092"}}}; + EXPECT_CALL(configuration, computeClusterConfigForTopic(topic1)).WillOnce(Return(cluster_config)); + EXPECT_CALL(configuration, computeClusterConfigForTopic(topic2)).WillOnce(Return(cluster_config)); + ThreadLocal::MockInstance slot_allocator; + EXPECT_CALL(slot_allocator, allocateSlot()) + .WillOnce(Invoke(&slot_allocator, &ThreadLocal::MockInstance::allocateSlotMock)); + Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest(); + UpstreamKafkaFacadeImpl testee = {configuration, slot_allocator, thread_factory}; + + // when + auto& result1 = testee.getProducerForTopic(topic1); + auto& result2 = testee.getProducerForTopic(topic2); + + // then + EXPECT_EQ(&result1, &result2); + EXPECT_EQ(testee.getProducerCountForTest(), 1); +} + +TEST(UpstreamKafkaFacadeTest, shouldCreateDifferentProducersForDifferentClusters) { + // given + const std::string topic1 = "topic1"; + const std::string topic2 = "topic2"; + + MockUpstreamKafkaConfiguration configuration; + // Notice it's the cluster name that matters, not the producer config. + const ClusterConfig cluster_config1 = {"cluster1", 1, {{"bootstrap.servers", "localhost:9092"}}}; + EXPECT_CALL(configuration, computeClusterConfigForTopic(topic1)) + .WillOnce(Return(cluster_config1)); + const ClusterConfig cluster_config2 = {"cluster2", 1, {{"bootstrap.servers", "localhost:9092"}}}; + EXPECT_CALL(configuration, computeClusterConfigForTopic(topic2)) + .WillOnce(Return(cluster_config2)); + ThreadLocal::MockInstance slot_allocator; + EXPECT_CALL(slot_allocator, allocateSlot()) + .WillOnce(Invoke(&slot_allocator, &ThreadLocal::MockInstance::allocateSlotMock)); + Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest(); + UpstreamKafkaFacadeImpl testee = {configuration, slot_allocator, thread_factory}; + + // when + auto& result1 = testee.getProducerForTopic(topic1); + auto& result2 = testee.getProducerForTopic(topic2); + + // then + EXPECT_NE(&result1, &result2); + EXPECT_EQ(testee.getProducerCountForTest(), 2); +} + +TEST(UpstreamKafkaFacadeTest, shouldThrowIfThereIsNoConfigurationForGivenTopic) { + // given + const std::string topic = "topic1"; + + MockUpstreamKafkaConfiguration configuration; + const ClusterConfig cluster_config = {"cluster", 1, {{"bootstrap.servers", "localhost:9092"}}}; + EXPECT_CALL(configuration, computeClusterConfigForTopic(topic)).WillOnce(Return(absl::nullopt)); + ThreadLocal::MockInstance slot_allocator; + EXPECT_CALL(slot_allocator, allocateSlot()) + .WillOnce(Invoke(&slot_allocator, &ThreadLocal::MockInstance::allocateSlotMock)); + Thread::ThreadFactory& thread_factory = Thread::threadFactoryForTest(); + UpstreamKafkaFacadeImpl testee = {configuration, slot_allocator, thread_factory}; + + // when, then - exception gets thrown. + EXPECT_THROW(testee.getProducerForTopic(topic), EnvoyException); +} + +} // namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/kafka/message_utilities.h b/contrib/kafka/filters/network/test/message_utilities.h similarity index 94% rename from test/extensions/filters/network/kafka/message_utilities.h rename to contrib/kafka/filters/network/test/message_utilities.h index 6fc0e1728b21e..00278094e2cc1 100644 --- a/test/extensions/filters/network/kafka/message_utilities.h +++ b/contrib/kafka/filters/network/test/message_utilities.h @@ -2,8 +2,8 @@ #include -#include "source/extensions/filters/network/kafka/kafka_request.h" -#include "source/extensions/filters/network/kafka/kafka_response.h" +#include "contrib/kafka/filters/network/source/kafka_request.h" +#include "contrib/kafka/filters/network/source/kafka_response.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/kafka/metrics_integration_test.cc b/contrib/kafka/filters/network/test/metrics_integration_test.cc similarity index 89% rename from test/extensions/filters/network/kafka/metrics_integration_test.cc rename to contrib/kafka/filters/network/test/metrics_integration_test.cc index 84a331a76003f..47873fbfaeef6 100644 --- a/test/extensions/filters/network/kafka/metrics_integration_test.cc +++ b/contrib/kafka/filters/network/test/metrics_integration_test.cc @@ -1,9 +1,8 @@ -#include "source/extensions/filters/network/kafka/external/request_metrics.h" -#include "source/extensions/filters/network/kafka/external/response_metrics.h" - #include "test/common/stats/stat_test_utility.h" -#include "test/extensions/filters/network/kafka/message_utilities.h" +#include "contrib/kafka/filters/network/source/external/request_metrics.h" +#include "contrib/kafka/filters/network/source/external/response_metrics.h" +#include "contrib/kafka/filters/network/test/message_utilities.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/test/extensions/filters/network/kafka/protocol/launcher.py b/contrib/kafka/filters/network/test/protocol/launcher.py similarity index 95% rename from test/extensions/filters/network/kafka/protocol/launcher.py rename to contrib/kafka/filters/network/test/protocol/launcher.py index 6894cecb4fea7..8a428f42d374e 100644 --- a/test/extensions/filters/network/kafka/protocol/launcher.py +++ b/contrib/kafka/filters/network/test/protocol/launcher.py @@ -2,7 +2,7 @@ # Launcher for generating Kafka protocol tests. -import source.extensions.filters.network.kafka.protocol.generator as generator +import contrib.kafka.filters.network.source.protocol.generator as generator import sys import os diff --git a/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 b/contrib/kafka/filters/network/test/protocol/request_codec_request_test_cc.j2 similarity index 90% rename from test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 rename to contrib/kafka/filters/network/test/protocol/request_codec_request_test_cc.j2 index 99b32400be79a..5fe3e99d4f64e 100644 --- a/test/extensions/filters/network/kafka/protocol/request_codec_request_test_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/request_codec_request_test_cc.j2 @@ -9,11 +9,11 @@ - capture messages received in callback, - verify that captured messages are identical to the ones sent. #} -#include "source/extensions/filters/network/kafka/external/requests.h" -#include "source/extensions/filters/network/kafka/request_codec.h" +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/request_codec.h" -#include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/extensions/filters/network/kafka/serialization_utilities.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/request_utilities_cc.j2 b/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 similarity index 94% rename from test/extensions/filters/network/kafka/protocol/request_utilities_cc.j2 rename to contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 index 90a3ab46c8947..a90796c0acc11 100644 --- a/test/extensions/filters/network/kafka/protocol/request_utilities_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 @@ -3,9 +3,9 @@ This file contains implementation of request-related methods contained in 'message_utilities.h'. #} -#include "test/extensions/filters/network/kafka/message_utilities.h" +#include "contrib/kafka/filters/network/test/message_utilities.h" -#include "source/extensions/filters/network/kafka/external/requests.h" +#include "contrib/kafka/filters/network/source/external/requests.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 b/contrib/kafka/filters/network/test/protocol/requests_test_cc.j2 similarity index 91% rename from test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 rename to contrib/kafka/filters/network/test/protocol/requests_test_cc.j2 index 4157eec181d24..9ce37e7cc6029 100644 --- a/test/extensions/filters/network/kafka/protocol/requests_test_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/requests_test_cc.j2 @@ -3,10 +3,10 @@ For every request, we want to check if it can be serialized and deserialized properly. #} -#include "source/extensions/filters/network/kafka/external/requests.h" -#include "source/extensions/filters/network/kafka/request_codec.h" +#include "contrib/kafka/filters/network/source/external/requests.h" +#include "contrib/kafka/filters/network/source/request_codec.h" -#include "test/extensions/filters/network/kafka/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 b/contrib/kafka/filters/network/test/protocol/response_codec_response_test_cc.j2 similarity index 90% rename from test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 rename to contrib/kafka/filters/network/test/protocol/response_codec_response_test_cc.j2 index d03e40e2b8537..bae7252e5c34a 100644 --- a/test/extensions/filters/network/kafka/protocol/response_codec_response_test_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/response_codec_response_test_cc.j2 @@ -9,11 +9,11 @@ - capture messages received in callback, - verify that captured messages are identical to the ones sent. #} -#include "source/extensions/filters/network/kafka/external/responses.h" -#include "source/extensions/filters/network/kafka/response_codec.h" +#include "contrib/kafka/filters/network/source/external/responses.h" +#include "contrib/kafka/filters/network/source/response_codec.h" -#include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/extensions/filters/network/kafka/serialization_utilities.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/protocol/response_utilities_cc.j2 b/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 similarity index 94% rename from test/extensions/filters/network/kafka/protocol/response_utilities_cc.j2 rename to contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 index af412270c1858..cf41d02e3ca78 100644 --- a/test/extensions/filters/network/kafka/protocol/response_utilities_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 @@ -3,9 +3,9 @@ This file contains implementation of response-related methods contained in 'message_utilities.h'. #} -#include "test/extensions/filters/network/kafka/message_utilities.h" +#include "contrib/kafka/filters/network/test/message_utilities.h" -#include "source/extensions/filters/network/kafka/external/responses.h" +#include "contrib/kafka/filters/network/source/external/responses.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 b/contrib/kafka/filters/network/test/protocol/responses_test_cc.j2 similarity index 91% rename from test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 rename to contrib/kafka/filters/network/test/protocol/responses_test_cc.j2 index cf5d552721acf..c8673bcdbcd05 100644 --- a/test/extensions/filters/network/kafka/protocol/responses_test_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/responses_test_cc.j2 @@ -3,10 +3,10 @@ For every response, we want to check if it can be serialized and deserialized properly. #} -#include "source/extensions/filters/network/kafka/external/responses.h" -#include "source/extensions/filters/network/kafka/response_codec.h" +#include "contrib/kafka/filters/network/source/external/responses.h" +#include "contrib/kafka/filters/network/source/response_codec.h" -#include "test/extensions/filters/network/kafka/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/request_codec_integration_test.cc b/contrib/kafka/filters/network/test/request_codec_integration_test.cc similarity index 91% rename from test/extensions/filters/network/kafka/request_codec_integration_test.cc rename to contrib/kafka/filters/network/test/request_codec_integration_test.cc index ceb475b450630..a907402b7643a 100644 --- a/test/extensions/filters/network/kafka/request_codec_integration_test.cc +++ b/contrib/kafka/filters/network/test/request_codec_integration_test.cc @@ -1,8 +1,6 @@ -#include "source/extensions/filters/network/kafka/request_codec.h" - -#include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/extensions/filters/network/kafka/serialization_utilities.h" - +#include "contrib/kafka/filters/network/source/request_codec.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/test/extensions/filters/network/kafka/request_codec_unit_test.cc b/contrib/kafka/filters/network/test/request_codec_unit_test.cc similarity index 97% rename from test/extensions/filters/network/kafka/request_codec_unit_test.cc rename to contrib/kafka/filters/network/test/request_codec_unit_test.cc index 0e6ad23ce5f06..95e8e9de7d3df 100644 --- a/test/extensions/filters/network/kafka/request_codec_unit_test.cc +++ b/contrib/kafka/filters/network/test/request_codec_unit_test.cc @@ -1,7 +1,5 @@ -#include "source/extensions/filters/network/kafka/request_codec.h" - -#include "test/extensions/filters/network/kafka/buffer_based_test.h" - +#include "contrib/kafka/filters/network/source/request_codec.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/response_codec_integration_test.cc b/contrib/kafka/filters/network/test/response_codec_integration_test.cc similarity index 92% rename from test/extensions/filters/network/kafka/response_codec_integration_test.cc rename to contrib/kafka/filters/network/test/response_codec_integration_test.cc index 191d738297f2b..8fd41b5dd3528 100644 --- a/test/extensions/filters/network/kafka/response_codec_integration_test.cc +++ b/contrib/kafka/filters/network/test/response_codec_integration_test.cc @@ -1,8 +1,6 @@ -#include "source/extensions/filters/network/kafka/response_codec.h" - -#include "test/extensions/filters/network/kafka/buffer_based_test.h" -#include "test/extensions/filters/network/kafka/serialization_utilities.h" - +#include "contrib/kafka/filters/network/source/response_codec.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/test/extensions/filters/network/kafka/response_codec_unit_test.cc b/contrib/kafka/filters/network/test/response_codec_unit_test.cc similarity index 97% rename from test/extensions/filters/network/kafka/response_codec_unit_test.cc rename to contrib/kafka/filters/network/test/response_codec_unit_test.cc index d05f42e9a9a25..33797381d0a62 100644 --- a/test/extensions/filters/network/kafka/response_codec_unit_test.cc +++ b/contrib/kafka/filters/network/test/response_codec_unit_test.cc @@ -1,7 +1,5 @@ -#include "source/extensions/filters/network/kafka/response_codec.h" - -#include "test/extensions/filters/network/kafka/buffer_based_test.h" - +#include "contrib/kafka/filters/network/source/response_codec.h" +#include "contrib/kafka/filters/network/test/buffer_based_test.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/serialization/launcher.py b/contrib/kafka/filters/network/test/serialization/launcher.py similarity index 90% rename from test/extensions/filters/network/kafka/serialization/launcher.py rename to contrib/kafka/filters/network/test/serialization/launcher.py index 5efd339d687e3..734705b797bd0 100644 --- a/test/extensions/filters/network/kafka/serialization/launcher.py +++ b/contrib/kafka/filters/network/test/serialization/launcher.py @@ -2,7 +2,7 @@ # Launcher for generating composite serializer tests. -import source.extensions.filters.network.kafka.serialization.generator as generator +import contrib.kafka.filters.network.source.serialization.generator as generator import sys import os diff --git a/test/extensions/filters/network/kafka/serialization/serialization_composite_test_cc.j2 b/contrib/kafka/filters/network/test/serialization/serialization_composite_test_cc.j2 similarity index 94% rename from test/extensions/filters/network/kafka/serialization/serialization_composite_test_cc.j2 rename to contrib/kafka/filters/network/test/serialization/serialization_composite_test_cc.j2 index 884f5dcfd81c7..f943984a3a389 100644 --- a/test/extensions/filters/network/kafka/serialization/serialization_composite_test_cc.j2 +++ b/contrib/kafka/filters/network/test/serialization/serialization_composite_test_cc.j2 @@ -5,9 +5,9 @@ Covers the corner case of 0 delegates, and then uses templating to create tests for 1..N cases. #} -#include "source/extensions/filters/network/kafka/external/serialization_composite.h" +#include "contrib/kafka/filters/network/source/external/serialization_composite.h" -#include "test/extensions/filters/network/kafka/serialization_utilities.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/kafka/serialization_test.cc b/contrib/kafka/filters/network/test/serialization_test.cc similarity index 99% rename from test/extensions/filters/network/kafka/serialization_test.cc rename to contrib/kafka/filters/network/test/serialization_test.cc index 5a6a1ae7e89b9..c177e86364dd7 100644 --- a/test/extensions/filters/network/kafka/serialization_test.cc +++ b/contrib/kafka/filters/network/test/serialization_test.cc @@ -1,8 +1,8 @@ -#include "source/extensions/filters/network/kafka/tagged_fields.h" - -#include "test/extensions/filters/network/kafka/serialization_utilities.h" #include "test/test_common/utility.h" +#include "contrib/kafka/filters/network/source/tagged_fields.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/test/extensions/filters/network/kafka/serialization_utilities.cc b/contrib/kafka/filters/network/test/serialization_utilities.cc similarity index 90% rename from test/extensions/filters/network/kafka/serialization_utilities.cc rename to contrib/kafka/filters/network/test/serialization_utilities.cc index 10d32b53ca7c5..18ff3b3782d10 100644 --- a/test/extensions/filters/network/kafka/serialization_utilities.cc +++ b/contrib/kafka/filters/network/test/serialization_utilities.cc @@ -1,4 +1,4 @@ -#include "test/extensions/filters/network/kafka/serialization_utilities.h" +#include "contrib/kafka/filters/network/test/serialization_utilities.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/kafka/serialization_utilities.h b/contrib/kafka/filters/network/test/serialization_utilities.h similarity index 99% rename from test/extensions/filters/network/kafka/serialization_utilities.h rename to contrib/kafka/filters/network/test/serialization_utilities.h index 4b063971f0c5c..7752776ba0ecb 100644 --- a/test/extensions/filters/network/kafka/serialization_utilities.h +++ b/contrib/kafka/filters/network/test/serialization_utilities.h @@ -1,10 +1,10 @@ #pragma once #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/kafka/serialization.h" #include "absl/container/fixed_array.h" #include "absl/strings/string_view.h" +#include "contrib/kafka/filters/network/source/serialization.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/source/extensions/filters/network/mysql_proxy/BUILD b/contrib/mysql_proxy/filters/network/source/BUILD similarity index 87% rename from source/extensions/filters/network/mysql_proxy/BUILD rename to contrib/mysql_proxy/filters/network/source/BUILD index 8c2f9316138ac..5b219fae1d395 100644 --- a/source/extensions/filters/network/mysql_proxy/BUILD +++ b/contrib/mysql_proxy/filters/network/source/BUILD @@ -1,17 +1,17 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", + "envoy_cc_contrib_extension", "envoy_cc_library", - "envoy_extension_package", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 +envoy_contrib_package() + # MySQL proxy L7 network filter. # Public docs: docs/root/configuration/network_filters/mysql_proxy_filter.rst -envoy_extension_package() - envoy_cc_library( name = "filter_lib", srcs = [ @@ -53,8 +53,8 @@ envoy_cc_library( deps = [ ":codec_interface", ":util_lib", + "//contrib/common/sqlutils/source:sqlutils_lib", "//source/common/buffer:buffer_lib", - "//source/extensions/common/sqlutils:sqlutils_lib", ], ) @@ -90,7 +90,7 @@ envoy_cc_library( deps = [ "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/network/mysql_proxy/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg_cc_proto", ], ) @@ -103,7 +103,7 @@ envoy_cc_library( ], ) -envoy_cc_extension( +envoy_cc_contrib_extension( name = "config", srcs = ["mysql_config.cc"], hdrs = ["mysql_config.h"], @@ -111,6 +111,6 @@ envoy_cc_extension( ":filter_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/network/mysql_proxy/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec.h b/contrib/mysql_proxy/filters/network/source/mysql_codec.h similarity index 100% rename from source/extensions/filters/network/mysql_proxy/mysql_codec.h rename to contrib/mysql_proxy/filters/network/source/mysql_codec.h diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.cc b/contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.cc similarity index 76% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.cc rename to contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.cc index be4739d909f93..a2b6e3664cb07 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.cc @@ -1,7 +1,10 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" +#include "source/common/buffer/buffer_impl.h" +#include "source/common/common/logger.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" namespace Envoy { namespace Extensions { @@ -55,6 +58,10 @@ bool ClientLogin::isClientSecureConnection() const { return client_cap_ & CLIENT_SECURE_CONNECTION; } +void ClientLogin::addConnectionAttribute(const std::pair& attr) { + conn_attr_.emplace_back(attr); +} + DecodeStatus ClientLogin::parseMessage(Buffer::Instance& buffer, uint32_t len) { /* 4.0 uses 2 bytes, 4.1+ uses 4 bytes, but the proto-flag is in the lower 2 * bytes */ @@ -96,6 +103,7 @@ DecodeStatus ClientLogin::parseResponseSsl(Buffer::Instance& buffer) { } DecodeStatus ClientLogin::parseResponse41(Buffer::Instance& buffer) { + int total = buffer.length(); uint16_t ext_cap; if (BufferHelper::readUint16(buffer, ext_cap) != DecodeStatus::Success) { ENVOY_LOG(debug, "error when parsing client cap flag of client login message"); @@ -155,6 +163,48 @@ DecodeStatus ClientLogin::parseResponse41(Buffer::Instance& buffer) { ENVOY_LOG(debug, "error when parsing auth plugin name of client login message"); return DecodeStatus::Failure; } + if (client_cap_ & CLIENT_CONNECT_ATTRS) { + // length of all key value pairs + uint64_t kvs_len; + if (BufferHelper::readLengthEncodedInteger(buffer, kvs_len) != DecodeStatus::Success) { + ENVOY_LOG(debug, "error when parsing length of all key-values in connection attributes of " + "client login message"); + return DecodeStatus::Failure; + } + while (kvs_len > 0) { + uint64_t str_len; + uint64_t prev_len = buffer.length(); + if (BufferHelper::readLengthEncodedInteger(buffer, str_len) != DecodeStatus::Success) { + ENVOY_LOG(debug, "error when parsing total length of connection attribute key in " + "connection attributes of " + "client login message"); + return DecodeStatus::Failure; + } + std::string key; + if (BufferHelper::readStringBySize(buffer, str_len, key) != DecodeStatus::Success) { + ENVOY_LOG(debug, "error when parsing connection attribute key in connection attributes of " + "client login message"); + return DecodeStatus::Failure; + } + if (BufferHelper::readLengthEncodedInteger(buffer, str_len) != DecodeStatus::Success) { + ENVOY_LOG( + debug, + "error when parsing length of connection attribute value in connection attributes of " + "client login message"); + return DecodeStatus::Failure; + } + std::string val; + if (BufferHelper::readStringBySize(buffer, str_len, val) != DecodeStatus::Success) { + ENVOY_LOG(debug, "error when parsing connection attribute val in connection attributes of " + "client login message"); + return DecodeStatus::Failure; + } + conn_attr_.emplace_back(std::make_pair(std::move(key), std::move(val))); + kvs_len -= prev_len - buffer.length(); + } + } + ENVOY_LOG(debug, "parsed client login protocol 41, consumed len {}, remain len {}", + total - buffer.length(), buffer.length()); return DecodeStatus::Success; } @@ -238,6 +288,17 @@ void ClientLogin::encodeResponse41(Buffer::Instance& out) const { BufferHelper::addString(out, auth_plugin_name_); BufferHelper::addUint8(out, enc_end_string); } + if (client_cap_ & CLIENT_CONNECT_ATTRS) { + Buffer::OwnedImpl conn_attr; + for (const auto& kv : conn_attr_) { + BufferHelper::addLengthEncodedInteger(conn_attr, kv.first.length()); + BufferHelper::addString(conn_attr, kv.first); + BufferHelper::addLengthEncodedInteger(conn_attr, kv.second.length()); + BufferHelper::addString(conn_attr, kv.second); + } + BufferHelper::addLengthEncodedInteger(out, conn_attr.length()); + out.move(conn_attr); + } } void ClientLogin::encodeResponse320(Buffer::Instance& out) const { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h b/contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h similarity index 87% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h rename to contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h index fbf3f7b22a723..a264d240ab674 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h @@ -2,7 +2,8 @@ #include "envoy/buffer/buffer.h" #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" namespace Envoy { namespace Extensions { @@ -25,6 +26,9 @@ class ClientLogin : public MySQLCodec { const std::vector& getAuthResp() const { return auth_resp_; } const std::string& getDb() const { return db_; } const std::string& getAuthPluginName() const { return auth_plugin_name_; } + const std::vector>& getConnectionAttribute() const { + return conn_attr_; + } bool isResponse41() const; bool isResponse320() const; bool isSSLRequest() const; @@ -40,6 +44,7 @@ class ClientLogin : public MySQLCodec { void setAuthResp(const std::vector& auth_resp); void setDb(const std::string& db); void setAuthPluginName(const std::string& auth_plugin_name); + void addConnectionAttribute(const std::pair&); private: DecodeStatus parseResponseSsl(Buffer::Instance& buffer); @@ -56,6 +61,7 @@ class ClientLogin : public MySQLCodec { std::vector auth_resp_; std::string db_; std::string auth_plugin_name_; + std::vector> conn_attr_; }; } // namespace MySQLProxy diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.cc b/contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.cc similarity index 96% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.cc rename to contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.cc index 8847de47baf0f..0df27bab2f6ab 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.cc @@ -1,11 +1,12 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h" #include "envoy/buffer/buffer.h" #include "source/common/common/assert.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h b/contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h similarity index 96% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h rename to contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h index a843e0c85b623..ca48b19401e1f 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h @@ -5,8 +5,9 @@ #include "envoy/buffer/buffer.h" #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_command.cc b/contrib/mysql_proxy/filters/network/source/mysql_codec_command.cc similarity index 90% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_command.cc rename to contrib/mysql_proxy/filters/network/source/mysql_codec_command.cc index 4e515381760ae..6d6aa88adcdb9 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_command.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_command.cc @@ -1,11 +1,12 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_command.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_command.h" #include "envoy/buffer/buffer.h" #include "source/common/common/logger.h" #include "source/common/common/macros.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h b/contrib/mysql_proxy/filters/network/source/mysql_codec_command.h similarity index 96% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_command.h rename to contrib/mysql_proxy/filters/network/source/mysql_codec_command.h index 4764770e07040..80e42a3fd5e06 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_command.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_command.h @@ -2,7 +2,8 @@ #include "envoy/buffer/buffer.h" #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.cc b/contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.cc similarity index 97% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.cc rename to contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.cc index 88b2ba5f1ec1b..ab5550a05b510 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.cc @@ -1,9 +1,9 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h" #include "envoy/buffer/buffer.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h b/contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h similarity index 97% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h rename to contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h index a507a5ec89398..11b5fec77bacc 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h @@ -1,6 +1,7 @@ #pragma once #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.cc b/contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.cc similarity index 77% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.cc rename to contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.cc index 120669c5f2883..d55d820ec9904 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.cc @@ -1,10 +1,11 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.h" #include "envoy/buffer/buffer.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h b/contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.h similarity index 91% rename from source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h rename to contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.h index f6128191e3721..e649092f36ca2 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.h @@ -1,7 +1,8 @@ #pragma once #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_config.cc b/contrib/mysql_proxy/filters/network/source/mysql_config.cc similarity index 80% rename from source/extensions/filters/network/mysql_proxy/mysql_config.cc rename to contrib/mysql_proxy/filters/network/source/mysql_config.cc index 39a68f5f8d267..c5e3b32d66ec1 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_config.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_config.cc @@ -1,14 +1,15 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_config.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_config.h" #include -#include "envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.h" -#include "envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.validate.h" #include "envoy/registry/registry.h" #include "envoy/server/filter_config.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_filter.h" + +#include "contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.validate.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_filter.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_config.h b/contrib/mysql_proxy/filters/network/source/mysql_config.h similarity index 77% rename from source/extensions/filters/network/mysql_proxy/mysql_config.h rename to contrib/mysql_proxy/filters/network/source/mysql_config.h index 66e459f901449..bd69b77585466 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_config.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_config.h @@ -1,12 +1,12 @@ #pragma once -#include "envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.h" -#include "envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.validate.h" - #include "source/extensions/filters/network/common/factory_base.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_filter.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.pb.validate.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_filter.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_decoder.h b/contrib/mysql_proxy/filters/network/source/mysql_decoder.h similarity index 73% rename from source/extensions/filters/network/mysql_proxy/mysql_decoder.h rename to contrib/mysql_proxy/filters/network/source/mysql_decoder.h index 6b4f83a2c8cc6..491cb93a49d26 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_decoder.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_decoder.h @@ -1,12 +1,12 @@ #pragma once -#include "source/extensions/common/sqlutils/sqlutils.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_command.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_session.h" +#include "contrib/common/sqlutils/source/sqlutils.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_command.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_session.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_decoder_impl.cc b/contrib/mysql_proxy/filters/network/source/mysql_decoder_impl.cc similarity index 95% rename from source/extensions/filters/network/mysql_proxy/mysql_decoder_impl.cc rename to contrib/mysql_proxy/filters/network/source/mysql_decoder_impl.cc index b3006abb670d1..36d28b7688093 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_decoder_impl.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_decoder_impl.cc @@ -1,9 +1,10 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_decoder_impl.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_decoder_impl.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_decoder_impl.h b/contrib/mysql_proxy/filters/network/source/mysql_decoder_impl.h similarity index 92% rename from source/extensions/filters/network/mysql_proxy/mysql_decoder_impl.h rename to contrib/mysql_proxy/filters/network/source/mysql_decoder_impl.h index ae787ba777c04..6c126edaeba1f 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_decoder_impl.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_decoder_impl.h @@ -1,5 +1,5 @@ #pragma once -#include "source/extensions/filters/network/mysql_proxy/mysql_decoder.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_decoder.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc b/contrib/mysql_proxy/filters/network/source/mysql_filter.cc similarity index 93% rename from source/extensions/filters/network/mysql_proxy/mysql_filter.cc rename to contrib/mysql_proxy/filters/network/source/mysql_filter.cc index 10af4580d3902..cc92cca92aa8a 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_filter.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_filter.cc @@ -1,15 +1,16 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_filter.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_filter.h" #include "envoy/config/core/v3/base.pb.h" #include "source/common/buffer/buffer_impl.h" #include "source/common/common/assert.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_decoder_impl.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_decoder_impl.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_filter.h b/contrib/mysql_proxy/filters/network/source/mysql_filter.h similarity index 85% rename from source/extensions/filters/network/mysql_proxy/mysql_filter.h rename to contrib/mysql_proxy/filters/network/source/mysql_filter.h index 6b83b13770f0b..fc7d0b2c91475 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_filter.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_filter.h @@ -8,14 +8,15 @@ #include "envoy/stats/stats_macros.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_command.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_decoder.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_session.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_command.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_decoder.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_session.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/mysql_proxy/mysql_session.h b/contrib/mysql_proxy/filters/network/source/mysql_session.h similarity index 100% rename from source/extensions/filters/network/mysql_proxy/mysql_session.h rename to contrib/mysql_proxy/filters/network/source/mysql_session.h diff --git a/source/extensions/filters/network/mysql_proxy/mysql_utils.cc b/contrib/mysql_proxy/filters/network/source/mysql_utils.cc similarity index 98% rename from source/extensions/filters/network/mysql_proxy/mysql_utils.cc rename to contrib/mysql_proxy/filters/network/source/mysql_utils.cc index d2574cc94fbf7..c51fffd2076ce 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_utils.cc +++ b/contrib/mysql_proxy/filters/network/source/mysql_utils.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" #include "envoy/common/exception.h" diff --git a/source/extensions/filters/network/mysql_proxy/mysql_utils.h b/contrib/mysql_proxy/filters/network/source/mysql_utils.h similarity index 97% rename from source/extensions/filters/network/mysql_proxy/mysql_utils.h rename to contrib/mysql_proxy/filters/network/source/mysql_utils.h index d8548eed24f01..254ce0f8edc81 100644 --- a/source/extensions/filters/network/mysql_proxy/mysql_utils.h +++ b/contrib/mysql_proxy/filters/network/source/mysql_utils.h @@ -6,7 +6,8 @@ #include "source/common/buffer/buffer_impl.h" #include "source/common/common/byte_order.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" + +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/mysql_proxy/BUILD b/contrib/mysql_proxy/filters/network/test/BUILD similarity index 52% rename from test/extensions/filters/network/mysql_proxy/BUILD rename to contrib/mysql_proxy/filters/network/test/BUILD index 97710a2289ac1..1a7e65a434690 100644 --- a/test/extensions/filters/network/mysql_proxy/BUILD +++ b/contrib/mysql_proxy/filters/network/test/BUILD @@ -1,101 +1,91 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_package", -) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", - "envoy_extension_cc_test_library", + "envoy_cc_test", + "envoy_cc_test_library", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_contrib_package() -envoy_extension_cc_test_library( +envoy_cc_test_library( name = "mysql_test_utils_lib", srcs = ["mysql_test_utils.cc"], hdrs = ["mysql_test_utils.h"], - extension_names = ["envoy.filters.network.mysql_proxy"], deps = [ - "//source/extensions/filters/network/mysql_proxy:codec_lib", + "//contrib/mysql_proxy/filters/network/source:codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "mysql_codec_tests", srcs = [ "mysql_codec_test.cc", ], - extension_names = ["envoy.filters.network.mysql_proxy"], deps = [ ":mysql_test_utils_lib", - "//source/extensions/filters/network/mysql_proxy:codec_lib", + "//contrib/mysql_proxy/filters/network/source:codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "mysql_greet_tests", srcs = [ "mysql_greet_test.cc", ], - extension_names = ["envoy.filters.network.mysql_proxy"], deps = [ ":mysql_test_utils_lib", - "//source/extensions/filters/network/mysql_proxy:codec_lib", + "//contrib/mysql_proxy/filters/network/source:codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "mysql_auth_switch_resp_tests", srcs = [ "mysql_auth_switch_resp_test.cc", ], - extension_names = ["envoy.filters.network.mysql_proxy"], deps = [ ":mysql_test_utils_lib", - "//source/extensions/filters/network/mysql_proxy:codec_lib", + "//contrib/mysql_proxy/filters/network/source:codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "mysql_clogin_tests", srcs = [ "mysql_clogin_test.cc", ], - extension_names = ["envoy.filters.network.mysql_proxy"], deps = [ ":mysql_test_utils_lib", - "//source/extensions/filters/network/mysql_proxy:codec_lib", + "//contrib/mysql_proxy/filters/network/source:codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "mysql_clogin_resp_tests", srcs = [ "mysql_clogin_resp_test.cc", ], - extension_names = ["envoy.filters.network.mysql_proxy"], deps = [ ":mysql_test_utils_lib", - "//source/extensions/filters/network/mysql_proxy:codec_lib", + "//contrib/mysql_proxy/filters/network/source:codec_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "mysql_filter_tests", srcs = [ "mysql_filter_test.cc", ], - extension_names = ["envoy.filters.network.mysql_proxy"], deps = [ ":mysql_test_utils_lib", - "//source/extensions/filters/network/mysql_proxy:config", + "//contrib/mysql_proxy/filters/network/source:config", "//test/mocks/network:network_mocks", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "mysql_integration_test", srcs = [ "mysql_integration_test.cc", @@ -103,30 +93,28 @@ envoy_extension_cc_test( data = [ "mysql_test_config.yaml", ], - extension_names = ["envoy.filters.network.mysql_proxy"], deps = [ ":mysql_test_utils_lib", + "//contrib/mysql_proxy/filters/network/source:config", + "//contrib/mysql_proxy/filters/network/source:filter_lib", "//source/common/tcp_proxy", - "//source/extensions/filters/network/mysql_proxy:config", - "//source/extensions/filters/network/mysql_proxy:filter_lib", "//source/extensions/filters/network/tcp_proxy:config", "//test/integration:integration_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "mysql_command_tests", srcs = [ "mysql_command_test.cc", ], data = ["mysql_test_config.yaml"], - extension_names = ["envoy.filters.network.mysql_proxy"], external_deps = ["sqlparser"], deps = [ ":mysql_test_utils_lib", + "//contrib/mysql_proxy/filters/network/source:codec_lib", + "//contrib/mysql_proxy/filters/network/source:config", "//source/common/tcp_proxy", - "//source/extensions/filters/network/mysql_proxy:codec_lib", - "//source/extensions/filters/network/mysql_proxy:config", "//source/extensions/filters/network/tcp_proxy:config", "//test/integration:integration_lib", ], diff --git a/test/extensions/filters/network/mysql_proxy/mysql_auth_switch_resp_test.cc b/contrib/mysql_proxy/filters/network/test/mysql_auth_switch_resp_test.cc similarity index 93% rename from test/extensions/filters/network/mysql_proxy/mysql_auth_switch_resp_test.cc rename to contrib/mysql_proxy/filters/network/test/mysql_auth_switch_resp_test.cc index 7529af8de0c5e..97bee60e1314c 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_auth_switch_resp_test.cc +++ b/contrib/mysql_proxy/filters/network/test/mysql_auth_switch_resp_test.cc @@ -1,5 +1,4 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h" - +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.h" #include "gtest/gtest.h" #include "mysql_test_utils.h" diff --git a/test/extensions/filters/network/mysql_proxy/mysql_clogin_resp_test.cc b/contrib/mysql_proxy/filters/network/test/mysql_clogin_resp_test.cc similarity index 99% rename from test/extensions/filters/network/mysql_proxy/mysql_clogin_resp_test.cc rename to contrib/mysql_proxy/filters/network/test/mysql_clogin_resp_test.cc index 60a064b5d46ca..1d518a6768a65 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_clogin_resp_test.cc +++ b/contrib/mysql_proxy/filters/network/test/mysql_clogin_resp_test.cc @@ -1,7 +1,7 @@ #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h" #include "gtest/gtest.h" #include "mysql_test_utils.h" diff --git a/test/extensions/filters/network/mysql_proxy/mysql_clogin_test.cc b/contrib/mysql_proxy/filters/network/test/mysql_clogin_test.cc similarity index 87% rename from test/extensions/filters/network/mysql_proxy/mysql_clogin_test.cc rename to contrib/mysql_proxy/filters/network/test/mysql_clogin_test.cc index d06aad773f1d2..3772e220eb265 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_clogin_test.cc +++ b/contrib/mysql_proxy/filters/network/test/mysql_clogin_test.cc @@ -1,7 +1,10 @@ +#include + #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" #include "gtest/gtest.h" #include "mysql_test_utils.h" @@ -19,6 +22,7 @@ ClientLogin initClientLogin() { mysql_clogin_encode.setAuthResp(MySQLTestUtils::getAuthResp8()); mysql_clogin_encode.setDb(MySQLTestUtils::getDb()); mysql_clogin_encode.setAuthPluginName(MySQLTestUtils::getAuthPluginName()); + mysql_clogin_encode.addConnectionAttribute({"key", "val"}); return mysql_clogin_encode; } }; // namespace @@ -188,8 +192,8 @@ TEST_F(MySQLCLoginTest, MySQLClientLogin41IncompleteAuthResp) { * - message is decoded using the ClientLogin class */ TEST_F(MySQLCLoginTest, MySQLClientLogin41EncDec) { - ClientLogin& mysql_clogin_encode = - MySQLCLoginTest::getClientLogin(CLIENT_PROTOCOL_41 | CLIENT_CONNECT_WITH_DB); + ClientLogin& mysql_clogin_encode = MySQLCLoginTest::getClientLogin( + CLIENT_PROTOCOL_41 | CLIENT_CONNECT_WITH_DB | CLIENT_CONNECT_ATTRS); Buffer::OwnedImpl decode_data; mysql_clogin_encode.encode(decode_data); @@ -204,7 +208,8 @@ TEST_F(MySQLCLoginTest, MySQLClientLogin41EncDec) { EXPECT_EQ(mysql_clogin_decode.getCharset(), mysql_clogin_encode.getCharset()); EXPECT_EQ(mysql_clogin_decode.getUsername(), mysql_clogin_encode.getUsername()); EXPECT_EQ(mysql_clogin_decode.getAuthResp(), mysql_clogin_encode.getAuthResp()); - + EXPECT_EQ(mysql_clogin_decode.getConnectionAttribute(), + mysql_clogin_encode.getConnectionAttribute()); EXPECT_TRUE(mysql_clogin_decode.getAuthPluginName().empty()); } @@ -553,6 +558,110 @@ TEST_F(MySQLCLoginTest, MySQLClientLogin41IncompleteAuthPluginName) { EXPECT_EQ(mysql_clogin_decode.getAuthPluginName(), ""); } +class MySQL41LoginConnAttrTest : public MySQLCLoginTest { +public: + MySQL41LoginConnAttrTest() { + login_encode_ = MySQLCLoginTest::getClientLogin(CLIENT_PROTOCOL_41 | CLIENT_CONNECT_WITH_DB | + CLIENT_PLUGIN_AUTH | CLIENT_CONNECT_ATTRS); + incomplete_base_len_ = + sizeof(login_encode_.getClientCap()) + sizeof(login_encode_.getMaxPacket()) + + sizeof(login_encode_.getCharset()) + UNSET_BYTES + login_encode_.getUsername().size() + 1 + + login_encode_.getAuthResp().size() + 1 + login_encode_.getDb().size() + 1 + + login_encode_.getAuthPluginName().length() + 1; + } + + void prepareLoginDecode(int delta_len = 0) { + Buffer::OwnedImpl buffer; + login_encode_.encode(buffer); + int incomplete_len = incomplete_base_len_ + delta_len; + Buffer::OwnedImpl decode_data(buffer.toString().data(), incomplete_len); + + login_decode_.decode(decode_data, CHALLENGE_SEQ_NUM, decode_data.length()); + } + + void checkLoginDecode(const std::function& additional_check = nullptr) { + EXPECT_TRUE(login_decode_.isConnectWithDb()); + EXPECT_EQ(login_decode_.getClientCap(), login_encode_.getClientCap()); + EXPECT_EQ(login_decode_.getExtendedClientCap(), login_decode_.getExtendedClientCap()); + EXPECT_EQ(login_decode_.getMaxPacket(), login_encode_.getMaxPacket()); + EXPECT_EQ(login_decode_.getCharset(), login_encode_.getCharset()); + EXPECT_EQ(login_decode_.getUsername(), login_encode_.getUsername()); + EXPECT_EQ(login_decode_.getAuthResp(), login_encode_.getAuthResp()); + EXPECT_EQ(login_decode_.getDb(), login_encode_.getDb()); + EXPECT_EQ(login_decode_.getAuthPluginName(), login_encode_.getAuthPluginName()); + if (additional_check != nullptr) { + additional_check(); + } + } + const ClientLogin& loginEncode() const { return login_encode_; } + const ClientLogin& loginDecode() const { return login_decode_; } + +private: + ClientLogin login_encode_; + ClientLogin login_decode_; + int incomplete_base_len_; +}; + +/* + * Negative Test the MYSQL Client Login 41 message parser: + * Incomplete total length of connection attributions + */ +TEST_F(MySQL41LoginConnAttrTest, MySQLClientLogin41IncompleteConnAttrLength) { + prepareLoginDecode(); + checkLoginDecode([&]() { EXPECT_EQ(loginDecode().getConnectionAttribute().size(), 0); }); +} + +/* + * Negative Test the MYSQL Client Login 41 message parser: + * Incomplete length of connection attribution key + */ +TEST_F(MySQL41LoginConnAttrTest, MySQLClientLogin41IncompleteConnAttrKeyLength) { + prepareLoginDecode( + MySQLTestUtils::bytesOfConnAtrributeLength(loginEncode().getConnectionAttribute())); + + checkLoginDecode([&]() { EXPECT_EQ(loginDecode().getConnectionAttribute().size(), 0); }); +} + +/* + * Negative Test the MYSQL Client Login 41 message parser: + * Incomplete connection attribution key + */ +TEST_F(MySQL41LoginConnAttrTest, MySQLClientLogin41IncompleteConnAttrKey) { + prepareLoginDecode( + MySQLTestUtils::bytesOfConnAtrributeLength(loginEncode().getConnectionAttribute()) + + MySQLTestUtils::sizeOfLengthEncodeInteger( + loginEncode().getConnectionAttribute()[0].first.length())); + checkLoginDecode([&]() { EXPECT_EQ(loginDecode().getConnectionAttribute().size(), 0); }); +} + +/* + * Negative Test the MYSQL Client Login 41 message parser: + * Incomplete length of connection attribution val + */ +TEST_F(MySQL41LoginConnAttrTest, MySQLClientLogin41IncompleteConnAttrValLength) { + prepareLoginDecode( + MySQLTestUtils::bytesOfConnAtrributeLength(loginEncode().getConnectionAttribute()) + + MySQLTestUtils::sizeOfLengthEncodeInteger( + loginEncode().getConnectionAttribute()[0].first.length()) + + loginEncode().getConnectionAttribute()[0].first.length()); + checkLoginDecode([&]() { EXPECT_EQ(loginDecode().getConnectionAttribute().size(), 0); }); +} + +/* + * Negative Test the MYSQL Client Login 41 message parser: + * Incomplete connection attribution val + */ +TEST_F(MySQL41LoginConnAttrTest, MySQLClientLogin41IncompleteConnAttrVal) { + prepareLoginDecode( + MySQLTestUtils::bytesOfConnAtrributeLength(loginEncode().getConnectionAttribute()) + + MySQLTestUtils::sizeOfLengthEncodeInteger( + loginEncode().getConnectionAttribute()[0].first.length()) + + loginEncode().getConnectionAttribute()[0].first.length() + + MySQLTestUtils::sizeOfLengthEncodeInteger( + loginEncode().getConnectionAttribute()[0].second.length())); + checkLoginDecode([&]() { EXPECT_EQ(loginDecode().getConnectionAttribute().size(), 0); }); +} + /* * Negative Test the MYSQL Client 320 login message parser: * Incomplete header at cap diff --git a/test/extensions/filters/network/mysql_proxy/mysql_codec_test.cc b/contrib/mysql_proxy/filters/network/test/mysql_codec_test.cc similarity index 95% rename from test/extensions/filters/network/mysql_proxy/mysql_codec_test.cc rename to contrib/mysql_proxy/filters/network/test/mysql_codec_test.cc index 3d1add37faf0c..cec25deb66ef5 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_codec_test.cc +++ b/contrib/mysql_proxy/filters/network/test/mysql_codec_test.cc @@ -1,7 +1,7 @@ #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_command.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_command.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" #include "gtest/gtest.h" #include "mysql_test_utils.h" diff --git a/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc b/contrib/mysql_proxy/filters/network/test/mysql_command_test.cc similarity index 98% rename from test/extensions/filters/network/mysql_proxy/mysql_command_test.cc rename to contrib/mysql_proxy/filters/network/test/mysql_command_test.cc index 906d6f2b55303..02d4c9fa3ea8a 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_command_test.cc +++ b/contrib/mysql_proxy/filters/network/test/mysql_command_test.cc @@ -1,11 +1,11 @@ #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_command.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_command.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include "include/sqlparser/SQLParser.h" diff --git a/test/extensions/filters/network/mysql_proxy/mysql_filter_test.cc b/contrib/mysql_proxy/filters/network/test/mysql_filter_test.cc similarity index 99% rename from test/extensions/filters/network/mysql_proxy/mysql_filter_test.cc rename to contrib/mysql_proxy/filters/network/test/mysql_filter_test.cc index 85756cf59e724..89886c048ff47 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_filter_test.cc +++ b/contrib/mysql_proxy/filters/network/test/mysql_filter_test.cc @@ -1,10 +1,10 @@ #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_filter.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_utils.h" #include "test/mocks/network/mocks.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_filter.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_utils.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include "mysql_test_utils.h" diff --git a/test/extensions/filters/network/mysql_proxy/mysql_greet_test.cc b/contrib/mysql_proxy/filters/network/test/mysql_greet_test.cc similarity index 99% rename from test/extensions/filters/network/mysql_proxy/mysql_greet_test.cc rename to contrib/mysql_proxy/filters/network/test/mysql_greet_test.cc index 185198aa85875..2e3c4a8fcd51b 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_greet_test.cc +++ b/contrib/mysql_proxy/filters/network/test/mysql_greet_test.cc @@ -1,7 +1,7 @@ #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h" #include "gtest/gtest.h" #include "mysql_test_utils.h" diff --git a/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc b/contrib/mysql_proxy/filters/network/test/mysql_integration_test.cc similarity index 95% rename from test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc rename to contrib/mysql_proxy/filters/network/test/mysql_integration_test.cc index f2799c5fb2308..c44b165d03eb2 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_integration_test.cc +++ b/contrib/mysql_proxy/filters/network/test/mysql_integration_test.cc @@ -1,15 +1,14 @@ -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_clogin_resp.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_greeting.h" -#include "source/extensions/filters/network/mysql_proxy/mysql_codec_switch_resp.h" - #include "test/integration/fake_upstream.h" #include "test/integration/integration.h" #include "test/integration/utility.h" #include "test/mocks/network/mocks.h" #include "test/test_common/network_utility.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_clogin_resp.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_greeting.h" +#include "contrib/mysql_proxy/filters/network/source/mysql_codec_switch_resp.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include "mysql_test_utils.h" @@ -26,7 +25,7 @@ class MySQLIntegrationTest : public testing::TestWithParam>& conn_attrs) { + int64_t total_len = 0; + for (const auto& kv : conn_attrs) { + total_len += sizeOfLengthEncodeInteger(kv.first.length()); + total_len += kv.first.length(); + total_len += sizeOfLengthEncodeInteger(kv.second.length()); + total_len += kv.second.length(); + } + return sizeOfLengthEncodeInteger(total_len); +} + int MySQLTestUtils::sizeOfLengthEncodeInteger(uint64_t val) { if (val < 251) { return sizeof(uint8_t); diff --git a/test/extensions/filters/network/mysql_proxy/mysql_test_utils.h b/contrib/mysql_proxy/filters/network/test/mysql_test_utils.h similarity index 92% rename from test/extensions/filters/network/mysql_proxy/mysql_test_utils.h rename to contrib/mysql_proxy/filters/network/test/mysql_test_utils.h index 996cc92ff918c..40ad4ed7dc802 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_test_utils.h +++ b/contrib/mysql_proxy/filters/network/test/mysql_test_utils.h @@ -1,6 +1,5 @@ #pragma once -#include "source/extensions/filters/network/mysql_proxy/mysql_codec.h" - +#include "contrib/mysql_proxy/filters/network/source/mysql_codec.h" #include "fmt/format.h" namespace Envoy { @@ -37,6 +36,8 @@ class MySQLTestUtils { static std::string getDb() { return "mysql.db"; } static std::string getCommandResponse() { return "command response"; } static std::string getInfo() { return "info"; } + static int + bytesOfConnAtrributeLength(const std::vector>& conn); static int sizeOfLengthEncodeInteger(uint64_t val); std::string encodeServerGreeting(int protocol); diff --git a/source/extensions/filters/network/postgres_proxy/BUILD b/contrib/postgres_proxy/filters/network/source/BUILD similarity index 81% rename from source/extensions/filters/network/postgres_proxy/BUILD rename to contrib/postgres_proxy/filters/network/source/BUILD index 89c26a2cf94fa..9b0c71cd28989 100644 --- a/source/extensions/filters/network/postgres_proxy/BUILD +++ b/contrib/postgres_proxy/filters/network/source/BUILD @@ -1,19 +1,19 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", + "envoy_cc_contrib_extension", "envoy_cc_library", - "envoy_extension_package", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 +envoy_contrib_package() + #package(default_visibility = ["//visibility:public"]) # PostgresSQL proxy L7 network filter. # Public docs: docs/root/configuration/network_filters/postgres_proxy_filter.rst -envoy_extension_package() - envoy_cc_library( name = "filter", srcs = [ @@ -29,18 +29,18 @@ envoy_cc_library( ], repository = "@envoy", deps = [ + "//contrib/common/sqlutils/source:sqlutils_lib", "//envoy/network:filter_interface", "//envoy/server:filter_config_interface", "//envoy/stats:stats_interface", "//envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", "//source/common/network:filter_lib", - "//source/extensions/common/sqlutils:sqlutils_lib", "//source/extensions/filters/network:well_known_names", ], ) -envoy_cc_extension( +envoy_cc_contrib_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], @@ -49,6 +49,6 @@ envoy_cc_extension( ":filter", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", - "@envoy_api//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/network/postgres_proxy/config.cc b/contrib/postgres_proxy/filters/network/source/config.cc similarity index 95% rename from source/extensions/filters/network/postgres_proxy/config.cc rename to contrib/postgres_proxy/filters/network/source/config.cc index a1e148da6396b..ff19cbe37c0b2 100644 --- a/source/extensions/filters/network/postgres_proxy/config.cc +++ b/contrib/postgres_proxy/filters/network/source/config.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/postgres_proxy/config.h" +#include "contrib/postgres_proxy/filters/network/source/config.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/postgres_proxy/config.h b/contrib/postgres_proxy/filters/network/source/config.h similarity index 76% rename from source/extensions/filters/network/postgres_proxy/config.h rename to contrib/postgres_proxy/filters/network/source/config.h index 7edb63852575e..9a1fad8b1d311 100644 --- a/source/extensions/filters/network/postgres_proxy/config.h +++ b/contrib/postgres_proxy/filters/network/source/config.h @@ -1,12 +1,12 @@ #pragma once -#include "envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.h" -#include "envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.validate.h" - #include "source/extensions/filters/network/common/factory_base.h" -#include "source/extensions/filters/network/postgres_proxy/postgres_filter.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.validate.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_filter.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc b/contrib/postgres_proxy/filters/network/source/postgres_decoder.cc similarity index 99% rename from source/extensions/filters/network/postgres_proxy/postgres_decoder.cc rename to contrib/postgres_proxy/filters/network/source/postgres_decoder.cc index 793bd96f32d34..748275461da0d 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc +++ b/contrib/postgres_proxy/filters/network/source/postgres_decoder.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/postgres_proxy/postgres_decoder.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_decoder.h" #include diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h b/contrib/postgres_proxy/filters/network/source/postgres_decoder.h similarity index 97% rename from source/extensions/filters/network/postgres_proxy/postgres_decoder.h rename to contrib/postgres_proxy/filters/network/source/postgres_decoder.h index f62de0108f574..2d4dffcb41e4c 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h +++ b/contrib/postgres_proxy/filters/network/source/postgres_decoder.h @@ -5,11 +5,11 @@ #include "source/common/buffer/buffer_impl.h" #include "source/common/common/logger.h" -#include "source/extensions/common/sqlutils/sqlutils.h" -#include "source/extensions/filters/network/postgres_proxy/postgres_message.h" -#include "source/extensions/filters/network/postgres_proxy/postgres_session.h" #include "absl/container/flat_hash_map.h" +#include "contrib/common/sqlutils/source/sqlutils.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_message.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_session.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc b/contrib/postgres_proxy/filters/network/source/postgres_filter.cc similarity index 98% rename from source/extensions/filters/network/postgres_proxy/postgres_filter.cc rename to contrib/postgres_proxy/filters/network/source/postgres_filter.cc index 0e8daac36fed4..6255c2ba5917a 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.cc +++ b/contrib/postgres_proxy/filters/network/source/postgres_filter.cc @@ -1,11 +1,12 @@ -#include "source/extensions/filters/network/postgres_proxy/postgres_filter.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_filter.h" #include "envoy/buffer/buffer.h" #include "envoy/network/connection.h" -#include "source/extensions/filters/network/postgres_proxy/postgres_decoder.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_decoder.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/postgres_proxy/postgres_filter.h b/contrib/postgres_proxy/filters/network/source/postgres_filter.h similarity index 98% rename from source/extensions/filters/network/postgres_proxy/postgres_filter.h rename to contrib/postgres_proxy/filters/network/source/postgres_filter.h index cb89499d47c4f..dbc63686e8704 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_filter.h +++ b/contrib/postgres_proxy/filters/network/source/postgres_filter.h @@ -7,7 +7,8 @@ #include "source/common/buffer/buffer_impl.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/postgres_proxy/postgres_decoder.h" + +#include "contrib/postgres_proxy/filters/network/source/postgres_decoder.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/postgres_proxy/postgres_message.cc b/contrib/postgres_proxy/filters/network/source/postgres_message.cc similarity index 98% rename from source/extensions/filters/network/postgres_proxy/postgres_message.cc rename to contrib/postgres_proxy/filters/network/source/postgres_message.cc index b8e4a3d5febe5..91595b8f00566 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_message.cc +++ b/contrib/postgres_proxy/filters/network/source/postgres_message.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/postgres_proxy/postgres_message.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_message.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/postgres_proxy/postgres_message.h b/contrib/postgres_proxy/filters/network/source/postgres_message.h similarity index 100% rename from source/extensions/filters/network/postgres_proxy/postgres_message.h rename to contrib/postgres_proxy/filters/network/source/postgres_message.h diff --git a/source/extensions/filters/network/postgres_proxy/postgres_session.h b/contrib/postgres_proxy/filters/network/source/postgres_session.h similarity index 100% rename from source/extensions/filters/network/postgres_proxy/postgres_session.h rename to contrib/postgres_proxy/filters/network/source/postgres_session.h diff --git a/test/extensions/filters/network/postgres_proxy/BUILD b/contrib/postgres_proxy/filters/network/test/BUILD similarity index 52% rename from test/extensions/filters/network/postgres_proxy/BUILD rename to contrib/postgres_proxy/filters/network/test/BUILD index 2d5c9c22ee728..496f8ec416e5b 100644 --- a/test/extensions/filters/network/postgres_proxy/BUILD +++ b/contrib/postgres_proxy/filters/network/test/BUILD @@ -1,67 +1,60 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_package", -) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", - "envoy_extension_cc_test_library", + "envoy_cc_test", + "envoy_cc_test_library", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_contrib_package() -envoy_extension_cc_test_library( +envoy_cc_test_library( name = "postgres_test_utils_lib", srcs = ["postgres_test_utils.cc"], hdrs = ["postgres_test_utils.h"], - extension_names = ["envoy.filters.network.postgres_proxy"], deps = [ + "//contrib/postgres_proxy/filters/network/source:filter", "//source/common/buffer:buffer_lib", - "//source/extensions/filters/network/postgres_proxy:filter", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "postgres_decoder_tests", srcs = [ "postgres_decoder_test.cc", ], - extension_names = ["envoy.filters.network.postgres_proxy"], deps = [ ":postgres_test_utils_lib", - "//source/extensions/filters/network/postgres_proxy:filter", + "//contrib/postgres_proxy/filters/network/source:filter", "//test/mocks/network:network_mocks", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "postgres_message_tests", srcs = [ "postgres_message_test.cc", ], - extension_names = ["envoy.filters.network.postgres_proxy"], deps = [ + "//contrib/postgres_proxy/filters/network/source:filter", "//source/common/buffer:buffer_lib", - "//source/extensions/filters/network/postgres_proxy:filter", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "postgres_filter_tests", srcs = [ "postgres_filter_test.cc", ], - extension_names = ["envoy.filters.network.postgres_proxy"], deps = [ ":postgres_test_utils_lib", - "//source/extensions/filters/network/postgres_proxy:filter", + "//contrib/postgres_proxy/filters/network/source:filter", "//test/mocks/network:network_mocks", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "postgres_integration_test", srcs = [ "postgres_integration_test.cc", @@ -70,14 +63,13 @@ envoy_extension_cc_test( "postgres_test_config.yaml", "//test/config/integration/certs", ], - extension_names = ["envoy.filters.network.postgres_proxy"], deps = [ + "//contrib/postgres_proxy/filters/network/source:config", + "//contrib/postgres_proxy/filters/network/source:filter", "//source/common/tcp_proxy", - "//source/extensions/filters/network/postgres_proxy:config", - "//source/extensions/filters/network/postgres_proxy:filter", "//source/extensions/filters/network/tcp_proxy:config", "//source/extensions/transport_sockets/starttls:config", "//test/integration:integration_lib", - "@envoy_api//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc b/contrib/postgres_proxy/filters/network/test/postgres_decoder_test.cc similarity index 99% rename from test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc rename to contrib/postgres_proxy/filters/network/test/postgres_decoder_test.cc index 3c6e05bf9f1fd..b143c1db25735 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc +++ b/contrib/postgres_proxy/filters/network/test/postgres_decoder_test.cc @@ -1,9 +1,8 @@ #include #include -#include "source/extensions/filters/network/postgres_proxy/postgres_decoder.h" - -#include "test/extensions/filters/network/postgres_proxy/postgres_test_utils.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_decoder.h" +#include "contrib/postgres_proxy/filters/network/test/postgres_test_utils.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc b/contrib/postgres_proxy/filters/network/test/postgres_filter_test.cc similarity index 99% rename from test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc rename to contrib/postgres_proxy/filters/network/test/postgres_filter_test.cc index 6564883727602..3a081ce55b688 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_filter_test.cc +++ b/contrib/postgres_proxy/filters/network/test/postgres_filter_test.cc @@ -3,12 +3,13 @@ #include -#include "source/extensions/filters/network/postgres_proxy/postgres_filter.h" #include "source/extensions/filters/network/well_known_names.h" -#include "test/extensions/filters/network/postgres_proxy/postgres_test_utils.h" #include "test/mocks/network/mocks.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_filter.h" +#include "contrib/postgres_proxy/filters/network/test/postgres_test_utils.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc b/contrib/postgres_proxy/filters/network/test/postgres_integration_test.cc similarity index 96% rename from test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc rename to contrib/postgres_proxy/filters/network/test/postgres_integration_test.cc index c9308517ea8db..258410172a326 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_integration_test.cc +++ b/contrib/postgres_proxy/filters/network/test/postgres_integration_test.cc @@ -1,12 +1,11 @@ -#include "envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.h" -#include "envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.validate.h" - #include "test/integration/fake_upstream.h" #include "test/integration/integration.h" #include "test/integration/utility.h" #include "test/mocks/network/mocks.h" #include "test/test_common/network_utility.h" +#include "contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.pb.validate.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -21,7 +20,7 @@ class PostgresBaseIntegrationTest : public testing::TestWithParam #include "source/common/buffer/buffer_impl.h" -#include "source/extensions/filters/network/postgres_proxy/postgres_message.h" +#include "contrib/postgres_proxy/filters/network/source/postgres_message.h" #include "fmt/printf.h" namespace Envoy { diff --git a/test/extensions/filters/network/postgres_proxy/postgres_test_config.yaml b/contrib/postgres_proxy/filters/network/test/postgres_test_config.yaml similarity index 100% rename from test/extensions/filters/network/postgres_proxy/postgres_test_config.yaml rename to contrib/postgres_proxy/filters/network/test/postgres_test_config.yaml diff --git a/test/extensions/filters/network/postgres_proxy/postgres_test_utils.cc b/contrib/postgres_proxy/filters/network/test/postgres_test_utils.cc similarity index 87% rename from test/extensions/filters/network/postgres_proxy/postgres_test_utils.cc rename to contrib/postgres_proxy/filters/network/test/postgres_test_utils.cc index 56450f5c5fe98..8793acf3b8151 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_test_utils.cc +++ b/contrib/postgres_proxy/filters/network/test/postgres_test_utils.cc @@ -1,4 +1,4 @@ -#include "test/extensions/filters/network/postgres_proxy/postgres_test_utils.h" +#include "contrib/postgres_proxy/filters/network/test/postgres_test_utils.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/network/postgres_proxy/postgres_test_utils.h b/contrib/postgres_proxy/filters/network/test/postgres_test_utils.h similarity index 100% rename from test/extensions/filters/network/postgres_proxy/postgres_test_utils.h rename to contrib/postgres_proxy/filters/network/test/postgres_test_utils.h diff --git a/source/extensions/filters/network/rocketmq_proxy/BUILD b/contrib/rocketmq_proxy/filters/network/source/BUILD similarity index 84% rename from source/extensions/filters/network/rocketmq_proxy/BUILD rename to contrib/rocketmq_proxy/filters/network/source/BUILD index f9ea690d242d8..b15d7db7e41ba 100644 --- a/source/extensions/filters/network/rocketmq_proxy/BUILD +++ b/contrib/rocketmq_proxy/filters/network/source/BUILD @@ -1,13 +1,13 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", + "envoy_cc_contrib_extension", "envoy_cc_library", - "envoy_extension_package", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 -envoy_extension_package() +envoy_contrib_package() envoy_cc_library( name = "well_known_names", @@ -95,6 +95,7 @@ envoy_cc_library( ":rocketmq_lib", ":stats_interface", ":well_known_names", + "//contrib/rocketmq_proxy/filters/network/source/router:router_interface", "//envoy/buffer:buffer_interface", "//envoy/event:dispatcher_interface", "//envoy/network:connection_interface", @@ -109,12 +110,11 @@ envoy_cc_library( "//source/common/stats:timespan_lib", "//source/common/upstream:load_balancer_lib", "//source/extensions/filters/network:well_known_names", - "//source/extensions/filters/network/rocketmq_proxy/router:router_interface", - "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", ], ) -envoy_cc_extension( +envoy_cc_contrib_extension( name = "config", srcs = [ "config.cc", @@ -124,15 +124,15 @@ envoy_cc_extension( ], deps = [ ":conn_manager_lib", + "//contrib/rocketmq_proxy/filters/network/source/router:route_matcher", + "//contrib/rocketmq_proxy/filters/network/source/router:router_lib", "//envoy/registry", "//envoy/server:filter_config_interface", "//source/common/common:logger_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", "//source/extensions/filters/network/common:factory_base_lib", - "//source/extensions/filters/network/rocketmq_proxy/router:route_matcher", - "//source/extensions/filters/network/rocketmq_proxy/router:router_lib", - "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/network/rocketmq_proxy/active_message.cc b/contrib/rocketmq_proxy/filters/network/source/active_message.cc similarity index 97% rename from source/extensions/filters/network/rocketmq_proxy/active_message.cc rename to contrib/rocketmq_proxy/filters/network/source/active_message.cc index d510ccadfebe7..15e0f505e3f4e 100644 --- a/source/extensions/filters/network/rocketmq_proxy/active_message.cc +++ b/contrib/rocketmq_proxy/filters/network/source/active_message.cc @@ -1,16 +1,16 @@ -#include "source/extensions/filters/network/rocketmq_proxy/active_message.h" +#include "contrib/rocketmq_proxy/filters/network/source/active_message.h" #include "envoy/upstream/cluster_manager.h" #include "source/common/common/empty_string.h" #include "source/common/common/enum_to_int.h" #include "source/common/protobuf/utility.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "source/extensions/filters/network/rocketmq_proxy/topic_route.h" -#include "source/extensions/filters/network/rocketmq_proxy/well_known_names.h" #include "source/extensions/filters/network/well_known_names.h" #include "absl/strings/match.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/topic_route.h" +#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" using Envoy::Tcp::ConnectionPool::ConnectionDataPtr; diff --git a/source/extensions/filters/network/rocketmq_proxy/active_message.h b/contrib/rocketmq_proxy/filters/network/source/active_message.h similarity index 92% rename from source/extensions/filters/network/rocketmq_proxy/active_message.h rename to contrib/rocketmq_proxy/filters/network/source/active_message.h index cbec9f1071280..bdf5d594181a6 100644 --- a/source/extensions/filters/network/rocketmq_proxy/active_message.h +++ b/contrib/rocketmq_proxy/filters/network/source/active_message.h @@ -8,12 +8,12 @@ #include "source/common/buffer/buffer_impl.h" #include "source/common/common/linked_object.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/rocketmq_proxy/codec.h" -#include "source/extensions/filters/network/rocketmq_proxy/protocol.h" -#include "source/extensions/filters/network/rocketmq_proxy/router/router.h" -#include "source/extensions/filters/network/rocketmq_proxy/topic_route.h" #include "absl/types/optional.h" +#include "contrib/rocketmq_proxy/filters/network/source/codec.h" +#include "contrib/rocketmq_proxy/filters/network/source/protocol.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/router.h" +#include "contrib/rocketmq_proxy/filters/network/source/topic_route.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/rocketmq_proxy/codec.cc b/contrib/rocketmq_proxy/filters/network/source/codec.cc similarity index 99% rename from source/extensions/filters/network/rocketmq_proxy/codec.cc rename to contrib/rocketmq_proxy/filters/network/source/codec.cc index 8255de59d3e1c..e5d35d1ffe432 100644 --- a/source/extensions/filters/network/rocketmq_proxy/codec.cc +++ b/contrib/rocketmq_proxy/filters/network/source/codec.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/rocketmq_proxy/codec.h" +#include "contrib/rocketmq_proxy/filters/network/source/codec.h" #include @@ -6,7 +6,8 @@ #include "source/common/common/empty_string.h" #include "source/common/common/enum_to_int.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/rocketmq_proxy/protocol.h" + +#include "contrib/rocketmq_proxy/filters/network/source/protocol.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/rocketmq_proxy/codec.h b/contrib/rocketmq_proxy/filters/network/source/codec.h similarity index 97% rename from source/extensions/filters/network/rocketmq_proxy/codec.h rename to contrib/rocketmq_proxy/filters/network/source/codec.h index 5459c380370e6..6ee9c8a9a97bb 100644 --- a/source/extensions/filters/network/rocketmq_proxy/codec.h +++ b/contrib/rocketmq_proxy/filters/network/source/codec.h @@ -10,7 +10,8 @@ #include "source/common/buffer/buffer_impl.h" #include "source/common/common/logger.h" #include "source/common/protobuf/utility.h" -#include "source/extensions/filters/network/rocketmq_proxy/protocol.h" + +#include "contrib/rocketmq_proxy/filters/network/source/protocol.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/rocketmq_proxy/config.cc b/contrib/rocketmq_proxy/filters/network/source/config.cc similarity index 89% rename from source/extensions/filters/network/rocketmq_proxy/config.cc rename to contrib/rocketmq_proxy/filters/network/source/config.cc index 938c79edaa716..dbb63f91a292a 100644 --- a/source/extensions/filters/network/rocketmq_proxy/config.cc +++ b/contrib/rocketmq_proxy/filters/network/source/config.cc @@ -1,15 +1,16 @@ -#include "source/extensions/filters/network/rocketmq_proxy/config.h" +#include "contrib/rocketmq_proxy/filters/network/source/config.h" #include -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" #include "envoy/registry/registry.h" #include "envoy/server/filter_config.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "source/extensions/filters/network/rocketmq_proxy/stats.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/stats.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/rocketmq_proxy/config.h b/contrib/rocketmq_proxy/filters/network/source/config.h similarity index 84% rename from source/extensions/filters/network/rocketmq_proxy/config.h rename to contrib/rocketmq_proxy/filters/network/source/config.h index 5ff3d7e98966a..978eb9ed5bfd0 100644 --- a/source/extensions/filters/network/rocketmq_proxy/config.h +++ b/contrib/rocketmq_proxy/filters/network/source/config.h @@ -3,15 +3,15 @@ #include #include -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" - #include "source/extensions/filters/network/common/factory_base.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h" -#include "source/extensions/filters/network/rocketmq_proxy/router/router_impl.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/route_matcher.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/router_impl.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc b/contrib/rocketmq_proxy/filters/network/source/conn_manager.cc similarity index 99% rename from source/extensions/filters/network/rocketmq_proxy/conn_manager.cc rename to contrib/rocketmq_proxy/filters/network/source/conn_manager.cc index 8f5ba509f6db4..7dfb039736436 100644 --- a/source/extensions/filters/network/rocketmq_proxy/conn_manager.cc +++ b/contrib/rocketmq_proxy/filters/network/source/conn_manager.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" #include "envoy/buffer/buffer.h" #include "envoy/network/connection.h" diff --git a/source/extensions/filters/network/rocketmq_proxy/conn_manager.h b/contrib/rocketmq_proxy/filters/network/source/conn_manager.h similarity index 94% rename from source/extensions/filters/network/rocketmq_proxy/conn_manager.h rename to contrib/rocketmq_proxy/filters/network/source/conn_manager.h index e724b15b3393c..125332b1c3bcd 100644 --- a/source/extensions/filters/network/rocketmq_proxy/conn_manager.h +++ b/contrib/rocketmq_proxy/filters/network/source/conn_manager.h @@ -3,8 +3,6 @@ #include #include "envoy/common/time.h" -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" #include "envoy/network/connection.h" #include "envoy/network/filter.h" #include "envoy/stats/scope.h" @@ -15,12 +13,14 @@ #include "source/common/buffer/buffer_impl.h" #include "source/common/common/logger.h" -#include "source/extensions/filters/network/rocketmq_proxy/active_message.h" -#include "source/extensions/filters/network/rocketmq_proxy/codec.h" -#include "source/extensions/filters/network/rocketmq_proxy/stats.h" #include "absl/container/flat_hash_map.h" #include "absl/strings/string_view.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" +#include "contrib/rocketmq_proxy/filters/network/source/active_message.h" +#include "contrib/rocketmq_proxy/filters/network/source/codec.h" +#include "contrib/rocketmq_proxy/filters/network/source/stats.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/rocketmq_proxy/metadata.h b/contrib/rocketmq_proxy/filters/network/source/metadata.h similarity index 100% rename from source/extensions/filters/network/rocketmq_proxy/metadata.h rename to contrib/rocketmq_proxy/filters/network/source/metadata.h diff --git a/source/extensions/filters/network/rocketmq_proxy/protocol.cc b/contrib/rocketmq_proxy/filters/network/source/protocol.cc similarity index 99% rename from source/extensions/filters/network/rocketmq_proxy/protocol.cc rename to contrib/rocketmq_proxy/filters/network/source/protocol.cc index f35a5f68d64fa..7b9ff954798e8 100644 --- a/source/extensions/filters/network/rocketmq_proxy/protocol.cc +++ b/contrib/rocketmq_proxy/filters/network/source/protocol.cc @@ -1,8 +1,9 @@ -#include "source/extensions/filters/network/rocketmq_proxy/protocol.h" +#include "contrib/rocketmq_proxy/filters/network/source/protocol.h" #include "source/common/common/assert.h" #include "source/common/common/enum_to_int.h" -#include "source/extensions/filters/network/rocketmq_proxy/well_known_names.h" + +#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/rocketmq_proxy/protocol.h b/contrib/rocketmq_proxy/filters/network/source/protocol.h similarity index 99% rename from source/extensions/filters/network/rocketmq_proxy/protocol.h rename to contrib/rocketmq_proxy/filters/network/source/protocol.h index c6a0855627993..03082f3398c3f 100644 --- a/source/extensions/filters/network/rocketmq_proxy/protocol.h +++ b/contrib/rocketmq_proxy/filters/network/source/protocol.h @@ -9,9 +9,9 @@ #include "source/common/buffer/buffer_impl.h" #include "source/common/common/logger.h" #include "source/common/protobuf/protobuf.h" -#include "source/extensions/filters/network/rocketmq_proxy/metadata.h" #include "absl/strings/string_view.h" +#include "contrib/rocketmq_proxy/filters/network/source/metadata.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/rocketmq_proxy/router/BUILD b/contrib/rocketmq_proxy/filters/network/source/router/BUILD similarity index 80% rename from source/extensions/filters/network/rocketmq_proxy/router/BUILD rename to contrib/rocketmq_proxy/filters/network/source/router/BUILD index 396e5bc401323..03eb3b741abf9 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/BUILD +++ b/contrib/rocketmq_proxy/filters/network/source/router/BUILD @@ -1,12 +1,12 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_library", - "envoy_extension_package", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 -envoy_extension_package() +envoy_contrib_package() envoy_cc_library( name = "router_interface", @@ -24,10 +24,10 @@ envoy_cc_library( hdrs = ["router_impl.h"], deps = [ ":router_interface", + "//contrib/rocketmq_proxy/filters/network/source:conn_manager_lib", "//envoy/upstream:cluster_manager_interface", "//envoy/upstream:thread_local_cluster_interface", "//source/extensions/filters/network:well_known_names", - "//source/extensions/filters/network/rocketmq_proxy:conn_manager_lib", ], ) @@ -37,6 +37,7 @@ envoy_cc_library( hdrs = ["route_matcher.h"], deps = [ ":router_interface", + "//contrib/rocketmq_proxy/filters/network/source:metadata_lib", "//envoy/config:typed_config_interface", "//envoy/server:filter_config_interface", "//source/common/common:logger_lib", @@ -44,7 +45,6 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/router:metadatamatchcriteria_lib", "//source/extensions/filters/network:well_known_names", - "//source/extensions/filters/network/rocketmq_proxy:metadata_lib", - "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc b/contrib/rocketmq_proxy/filters/network/source/router/route_matcher.cc similarity index 94% rename from source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc rename to contrib/rocketmq_proxy/filters/network/source/router/route_matcher.cc index 4bb961f5fa391..d23ab2de9171e 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.cc +++ b/contrib/rocketmq_proxy/filters/network/source/router/route_matcher.cc @@ -1,9 +1,10 @@ -#include "source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/route_matcher.h" #include "source/common/router/metadatamatchcriteria_impl.h" -#include "source/extensions/filters/network/rocketmq_proxy/metadata.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/source/metadata.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h b/contrib/rocketmq_proxy/filters/network/source/router/route_matcher.h similarity index 88% rename from source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h rename to contrib/rocketmq_proxy/filters/network/source/router/route_matcher.h index 06aec7221d250..3a4c58715de83 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h +++ b/contrib/rocketmq_proxy/filters/network/source/router/route_matcher.h @@ -3,13 +3,14 @@ #include #include "envoy/config/typed_config.h" -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h" #include "envoy/server/filter_config.h" #include "source/common/common/logger.h" #include "source/common/common/matchers.h" #include "source/common/http/header_utility.h" -#include "source/extensions/filters/network/rocketmq_proxy/router/router.h" + +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/router.h" namespace Envoy { namespace Extensions { @@ -42,7 +43,7 @@ class RouteEntryImpl : public RouteEntry, private: bool headersMatch(const Http::HeaderMap& headers) const; - const Matchers::StringMatcherImpl topic_name_; + const Matchers::StringMatcherImpl topic_name_; const std::string cluster_name_; const std::vector config_headers_; Envoy::Router::MetadataMatchCriteriaConstPtr metadata_match_criteria_; diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router.h b/contrib/rocketmq_proxy/filters/network/source/router/router.h similarity index 100% rename from source/extensions/filters/network/rocketmq_proxy/router/router.h rename to contrib/rocketmq_proxy/filters/network/source/router/router.h diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc b/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc similarity index 94% rename from source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc rename to contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc index 7c24db78482f7..138ff56bd747b 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.cc +++ b/contrib/rocketmq_proxy/filters/network/source/router/router_impl.cc @@ -1,13 +1,14 @@ -#include "source/extensions/filters/network/rocketmq_proxy/router/router_impl.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/router_impl.h" #include "source/common/common/enum_to_int.h" -#include "source/extensions/filters/network/rocketmq_proxy/active_message.h" -#include "source/extensions/filters/network/rocketmq_proxy/codec.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "source/extensions/filters/network/rocketmq_proxy/protocol.h" -#include "source/extensions/filters/network/rocketmq_proxy/well_known_names.h" #include "source/extensions/filters/network/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/source/active_message.h" +#include "contrib/rocketmq_proxy/filters/network/source/codec.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/protocol.h" +#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" + namespace Envoy { namespace Extensions { namespace NetworkFilters { diff --git a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h b/contrib/rocketmq_proxy/filters/network/source/router/router_impl.h similarity index 97% rename from source/extensions/filters/network/rocketmq_proxy/router/router_impl.h rename to contrib/rocketmq_proxy/filters/network/source/router/router_impl.h index 38a61dcdfef2e..1cb222f5d7f9f 100644 --- a/source/extensions/filters/network/rocketmq_proxy/router/router_impl.h +++ b/contrib/rocketmq_proxy/filters/network/source/router/router_impl.h @@ -6,7 +6,8 @@ #include "source/common/common/logger.h" #include "source/common/upstream/load_balancer_impl.h" -#include "source/extensions/filters/network/rocketmq_proxy/router/router.h" + +#include "contrib/rocketmq_proxy/filters/network/source/router/router.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/rocketmq_proxy/stats.h b/contrib/rocketmq_proxy/filters/network/source/stats.h similarity index 100% rename from source/extensions/filters/network/rocketmq_proxy/stats.h rename to contrib/rocketmq_proxy/filters/network/source/stats.h diff --git a/source/extensions/filters/network/rocketmq_proxy/topic_route.cc b/contrib/rocketmq_proxy/filters/network/source/topic_route.cc similarity index 96% rename from source/extensions/filters/network/rocketmq_proxy/topic_route.cc rename to contrib/rocketmq_proxy/filters/network/source/topic_route.cc index afe0a828d0adb..7336ec97d17cc 100644 --- a/source/extensions/filters/network/rocketmq_proxy/topic_route.cc +++ b/contrib/rocketmq_proxy/filters/network/source/topic_route.cc @@ -1,4 +1,4 @@ -#include "source/extensions/filters/network/rocketmq_proxy/topic_route.h" +#include "contrib/rocketmq_proxy/filters/network/source/topic_route.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/network/rocketmq_proxy/topic_route.h b/contrib/rocketmq_proxy/filters/network/source/topic_route.h similarity index 100% rename from source/extensions/filters/network/rocketmq_proxy/topic_route.h rename to contrib/rocketmq_proxy/filters/network/source/topic_route.h diff --git a/source/extensions/filters/network/rocketmq_proxy/well_known_names.h b/contrib/rocketmq_proxy/filters/network/source/well_known_names.h similarity index 100% rename from source/extensions/filters/network/rocketmq_proxy/well_known_names.h rename to contrib/rocketmq_proxy/filters/network/source/well_known_names.h diff --git a/test/extensions/filters/network/rocketmq_proxy/BUILD b/contrib/rocketmq_proxy/filters/network/test/BUILD similarity index 59% rename from test/extensions/filters/network/rocketmq_proxy/BUILD rename to contrib/rocketmq_proxy/filters/network/test/BUILD index 975eaecd30608..00ac24134fd46 100644 --- a/test/extensions/filters/network/rocketmq_proxy/BUILD +++ b/contrib/rocketmq_proxy/filters/network/test/BUILD @@ -1,25 +1,22 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_mock", + "envoy_cc_test", "envoy_cc_test_library", - "envoy_package", -) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_contrib_package() envoy_cc_mock( name = "mocks_lib", srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ - "//source/extensions/filters/network/rocketmq_proxy:config", - "//source/extensions/filters/network/rocketmq_proxy/router:router_lib", + "//contrib/rocketmq_proxy/filters/network/source:config", + "//contrib/rocketmq_proxy/filters/network/source/router:router_lib", "//test/mocks/upstream:cluster_manager_mocks", ], ) @@ -29,48 +26,44 @@ envoy_cc_test_library( srcs = ["utility.cc"], hdrs = ["utility.h"], deps = [ - "//source/extensions/filters/network/rocketmq_proxy:config", + "//contrib/rocketmq_proxy/filters/network/source:config", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "protocol_test", srcs = ["protocol_test.cc"], - extension_names = ["envoy.filters.network.rocketmq_proxy"], deps = [ - "//source/extensions/filters/network/rocketmq_proxy:config", + "//contrib/rocketmq_proxy/filters/network/source:config", "//test/test_common:utility_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "router_test", srcs = ["router_test.cc"], - extension_names = ["envoy.filters.network.rocketmq_proxy"], deps = [ ":mocks_lib", ":utility_lib", - "//source/extensions/filters/network/rocketmq_proxy:config", + "//contrib/rocketmq_proxy/filters/network/source:config", "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "topic_route_test", srcs = ["topic_route_test.cc"], - extension_names = ["envoy.filters.network.rocketmq_proxy"], deps = [ + "//contrib/rocketmq_proxy/filters/network/source:config", "//source/common/protobuf:utility_lib", - "//source/extensions/filters/network/rocketmq_proxy:config", "//test/test_common:utility_lib", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "conn_manager_test", srcs = ["conn_manager_test.cc"], - extension_names = ["envoy.filters.network.rocketmq_proxy"], deps = [ ":utility_lib", "//test/common/stats:stat_test_utility_lib", @@ -83,14 +76,13 @@ envoy_extension_cc_test( ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "active_message_test", srcs = ["active_message_test.cc"], - extension_names = ["envoy.filters.network.rocketmq_proxy"], deps = [ ":utility_lib", + "//contrib/rocketmq_proxy/filters/network/source:config", "//source/common/network:address_lib", - "//source/extensions/filters/network/rocketmq_proxy:config", "//test/mocks/network:network_mocks", "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", @@ -98,24 +90,22 @@ envoy_extension_cc_test( ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "config_test", srcs = ["config_test.cc"], - extension_names = ["envoy.filters.network.rocketmq_proxy"], deps = [ - "//source/extensions/filters/network/rocketmq_proxy:config", + "//contrib/rocketmq_proxy/filters/network/source:config", "//test/mocks/local_info:local_info_mocks", "//test/mocks/server:factory_context_mocks", "//test/mocks/server:instance_mocks", "//test/test_common:registry_lib", - "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "codec_test", srcs = ["codec_test.cc"], - extension_names = ["envoy.filters.network.rocketmq_proxy"], deps = [ ":utility_lib", "//source/common/network:address_lib", @@ -125,13 +115,12 @@ envoy_extension_cc_test( ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "route_matcher_test", srcs = ["route_matcher_test.cc"], - extension_names = ["envoy.filters.network.rocketmq_proxy"], deps = [ - "//source/extensions/filters/network/rocketmq_proxy/router:route_matcher", + "//contrib/rocketmq_proxy/filters/network/source/router:route_matcher", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc b/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc similarity index 93% rename from test/extensions/filters/network/rocketmq_proxy/active_message_test.cc rename to contrib/rocketmq_proxy/filters/network/test/active_message_test.cc index 3720074f1b576..8b98a7be887b6 100644 --- a/test/extensions/filters/network/rocketmq_proxy/active_message_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc @@ -1,14 +1,14 @@ #include "source/common/network/address_impl.h" -#include "source/extensions/filters/network/rocketmq_proxy/active_message.h" -#include "source/extensions/filters/network/rocketmq_proxy/config.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "source/extensions/filters/network/rocketmq_proxy/protocol.h" -#include "source/extensions/filters/network/rocketmq_proxy/well_known_names.h" -#include "test/extensions/filters/network/rocketmq_proxy/utility.h" #include "test/mocks/network/mocks.h" #include "test/mocks/server/factory_context.h" +#include "contrib/rocketmq_proxy/filters/network/source/active_message.h" +#include "contrib/rocketmq_proxy/filters/network/source/config.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/protocol.h" +#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/codec_test.cc b/contrib/rocketmq_proxy/filters/network/test/codec_test.cc similarity index 99% rename from test/extensions/filters/network/rocketmq_proxy/codec_test.cc rename to contrib/rocketmq_proxy/filters/network/test/codec_test.cc index 9c8fe92beb238..9a8d471078a7c 100644 --- a/test/extensions/filters/network/rocketmq_proxy/codec_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/codec_test.cc @@ -2,10 +2,9 @@ #include "source/common/common/enum_to_int.h" #include "source/common/network/address_impl.h" #include "source/common/protobuf/utility.h" -#include "source/extensions/filters/network/rocketmq_proxy/codec.h" - -#include "test/extensions/filters/network/rocketmq_proxy/utility.h" +#include "contrib/rocketmq_proxy/filters/network/source/codec.h" +#include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/config_test.cc b/contrib/rocketmq_proxy/filters/network/test/config_test.cc similarity index 94% rename from test/extensions/filters/network/rocketmq_proxy/config_test.cc rename to contrib/rocketmq_proxy/filters/network/test/config_test.cc index 592ae5de01f83..62e5eb0516328 100644 --- a/test/extensions/filters/network/rocketmq_proxy/config_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/config_test.cc @@ -1,13 +1,11 @@ -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" - -#include "source/extensions/filters/network/rocketmq_proxy/config.h" - #include "test/mocks/local_info/mocks.h" #include "test/mocks/server/factory_context.h" #include "test/mocks/server/instance.h" #include "test/test_common/registry.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" +#include "contrib/rocketmq_proxy/filters/network/source/config.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -22,10 +20,9 @@ namespace RocketmqProxy { using RocketmqProxyProto = envoy::extensions::filters::network::rocketmq_proxy::v3::RocketmqProxy; -RocketmqProxyProto parseRocketmqProxyFromV3Yaml(const std::string& yaml, - bool avoid_boosting = true) { +RocketmqProxyProto parseRocketmqProxyFromV3Yaml(const std::string& yaml) { RocketmqProxyProto rocketmq_proxy; - TestUtility::loadFromYaml(yaml, rocketmq_proxy, false, avoid_boosting); + TestUtility::loadFromYaml(yaml, rocketmq_proxy); return rocketmq_proxy; } diff --git a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc b/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc similarity index 98% rename from test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc rename to contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc index 44e3c1a9d64e3..30b783c4a0d98 100644 --- a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc @@ -1,17 +1,16 @@ #include "envoy/network/connection.h" -#include "source/extensions/filters/network/rocketmq_proxy/config.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "source/extensions/filters/network/rocketmq_proxy/well_known_names.h" - #include "test/common/stats/stat_test_utility.h" #include "test/common/upstream/utility.h" -#include "test/extensions/filters/network/rocketmq_proxy/utility.h" #include "test/mocks/network/connection.h" #include "test/mocks/network/mocks.h" #include "test/mocks/server/factory_context.h" #include "test/mocks/server/instance.h" +#include "contrib/rocketmq_proxy/filters/network/source/config.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/mocks.cc b/contrib/rocketmq_proxy/filters/network/test/mocks.cc similarity index 92% rename from test/extensions/filters/network/rocketmq_proxy/mocks.cc rename to contrib/rocketmq_proxy/filters/network/test/mocks.cc index aea79d23d4e11..17f8ee9958eb4 100644 --- a/test/extensions/filters/network/rocketmq_proxy/mocks.cc +++ b/contrib/rocketmq_proxy/filters/network/test/mocks.cc @@ -1,7 +1,6 @@ -#include "test/extensions/filters/network/rocketmq_proxy/mocks.h" - -#include "source/extensions/filters/network/rocketmq_proxy/router/router_impl.h" +#include "contrib/rocketmq_proxy/filters/network/test/mocks.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/router_impl.h" #include "gtest/gtest.h" using testing::_; diff --git a/test/extensions/filters/network/rocketmq_proxy/mocks.h b/contrib/rocketmq_proxy/filters/network/test/mocks.h similarity index 93% rename from test/extensions/filters/network/rocketmq_proxy/mocks.h rename to contrib/rocketmq_proxy/filters/network/test/mocks.h index 23302611dc287..8eff761dfba2c 100644 --- a/test/extensions/filters/network/rocketmq_proxy/mocks.h +++ b/contrib/rocketmq_proxy/filters/network/test/mocks.h @@ -1,10 +1,9 @@ #pragma once -#include "source/extensions/filters/network/rocketmq_proxy/active_message.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" - #include "test/mocks/upstream/cluster_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/active_message.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" #include "gmock/gmock.h" namespace Envoy { diff --git a/test/extensions/filters/network/rocketmq_proxy/protocol_test.cc b/contrib/rocketmq_proxy/filters/network/test/protocol_test.cc similarity index 99% rename from test/extensions/filters/network/rocketmq_proxy/protocol_test.cc rename to contrib/rocketmq_proxy/filters/network/test/protocol_test.cc index 057b6dcc10d76..495eb74671463 100644 --- a/test/extensions/filters/network/rocketmq_proxy/protocol_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/protocol_test.cc @@ -1,6 +1,6 @@ #include "source/common/protobuf/utility.h" -#include "source/extensions/filters/network/rocketmq_proxy/protocol.h" +#include "contrib/rocketmq_proxy/filters/network/source/protocol.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc b/contrib/rocketmq_proxy/filters/network/test/route_matcher_test.cc similarity index 77% rename from test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc rename to contrib/rocketmq_proxy/filters/network/test/route_matcher_test.cc index 5153bc13729d9..daef5b3daff0b 100644 --- a/test/extensions/filters/network/rocketmq_proxy/route_matcher_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/route_matcher_test.cc @@ -1,13 +1,11 @@ -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h" -#include "envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.validate.h" - -#include "source/extensions/filters/network/rocketmq_proxy/metadata.h" -#include "source/extensions/filters/network/rocketmq_proxy/router/route_matcher.h" - #include "test/test_common/utility.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.pb.validate.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.h" +#include "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.pb.validate.h" +#include "contrib/rocketmq_proxy/filters/network/source/metadata.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/route_matcher.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/router_test.cc b/contrib/rocketmq_proxy/filters/network/test/router_test.cc similarity index 97% rename from test/extensions/filters/network/rocketmq_proxy/router_test.cc rename to contrib/rocketmq_proxy/filters/network/test/router_test.cc index 49521a9c4763f..3cee446599e4e 100644 --- a/test/extensions/filters/network/rocketmq_proxy/router_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/router_test.cc @@ -1,12 +1,11 @@ -#include "source/extensions/filters/network/rocketmq_proxy/config.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" -#include "source/extensions/filters/network/rocketmq_proxy/router/router.h" -#include "source/extensions/filters/network/rocketmq_proxy/well_known_names.h" - -#include "test/extensions/filters/network/rocketmq_proxy/mocks.h" -#include "test/extensions/filters/network/rocketmq_proxy/utility.h" #include "test/mocks/server/factory_context.h" +#include "contrib/rocketmq_proxy/filters/network/source/config.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" +#include "contrib/rocketmq_proxy/filters/network/source/router/router.h" +#include "contrib/rocketmq_proxy/filters/network/source/well_known_names.h" +#include "contrib/rocketmq_proxy/filters/network/test/mocks.h" +#include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gtest/gtest.h" using testing::_; diff --git a/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc b/contrib/rocketmq_proxy/filters/network/test/topic_route_test.cc similarity index 96% rename from test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc rename to contrib/rocketmq_proxy/filters/network/test/topic_route_test.cc index e1c64375c7407..59b1d8bd9c3d7 100644 --- a/test/extensions/filters/network/rocketmq_proxy/topic_route_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/topic_route_test.cc @@ -1,7 +1,7 @@ #include "source/common/protobuf/utility.h" -#include "source/extensions/filters/network/rocketmq_proxy/topic_route.h" #include "absl/container/node_hash_map.h" +#include "contrib/rocketmq_proxy/filters/network/source/topic_route.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/test/extensions/filters/network/rocketmq_proxy/utility.cc b/contrib/rocketmq_proxy/filters/network/test/utility.cc similarity index 99% rename from test/extensions/filters/network/rocketmq_proxy/utility.cc rename to contrib/rocketmq_proxy/filters/network/test/utility.cc index 16d89d148c0a4..c4459bd36ae6a 100644 --- a/test/extensions/filters/network/rocketmq_proxy/utility.cc +++ b/contrib/rocketmq_proxy/filters/network/test/utility.cc @@ -1,4 +1,4 @@ -#include "test/extensions/filters/network/rocketmq_proxy/utility.h" +#include "contrib/rocketmq_proxy/filters/network/test/utility.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/rocketmq_proxy/utility.h b/contrib/rocketmq_proxy/filters/network/test/utility.h similarity index 84% rename from test/extensions/filters/network/rocketmq_proxy/utility.h rename to contrib/rocketmq_proxy/filters/network/test/utility.h index ffbc286bc18b1..9055e1809b75b 100644 --- a/test/extensions/filters/network/rocketmq_proxy/utility.h +++ b/contrib/rocketmq_proxy/filters/network/test/utility.h @@ -1,8 +1,7 @@ #pragma once -#include "source/extensions/filters/network/rocketmq_proxy/config.h" -#include "source/extensions/filters/network/rocketmq_proxy/conn_manager.h" - +#include "contrib/rocketmq_proxy/filters/network/source/config.h" +#include "contrib/rocketmq_proxy/filters/network/source/conn_manager.h" #include "gtest/gtest.h" namespace Envoy { diff --git a/source/extensions/filters/http/squash/BUILD b/contrib/squash/filters/http/source/BUILD similarity index 78% rename from source/extensions/filters/http/squash/BUILD rename to contrib/squash/filters/http/source/BUILD index 65831764d4c7e..897149d5137fc 100644 --- a/source/extensions/filters/http/squash/BUILD +++ b/contrib/squash/filters/http/source/BUILD @@ -1,8 +1,8 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_cc_extension", + "envoy_cc_contrib_extension", "envoy_cc_library", - "envoy_extension_package", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 @@ -10,7 +10,7 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements the Squash microservice debugger # Public docs: docs/root/configuration/http_filters/squash_filter.rst -envoy_extension_package() +envoy_contrib_package() envoy_cc_library( name = "squash_filter_lib", @@ -29,19 +29,19 @@ envoy_cc_library( "//source/common/http:utility_lib", "//source/common/json:json_loader_lib", "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/http/squash/v3:pkg_cc_proto", ], ) -envoy_cc_extension( +envoy_cc_contrib_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], deps = [ + ":squash_filter_lib", "//envoy/registry", "//source/common/protobuf:utility_lib", "//source/extensions/filters/http/common:factory_base_lib", - "//source/extensions/filters/http/squash:squash_filter_lib", - "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/http/squash/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/squash/config.cc b/contrib/squash/filters/http/source/config.cc similarity index 80% rename from source/extensions/filters/http/squash/config.cc rename to contrib/squash/filters/http/source/config.cc index d2420a6bb8381..cec853979f2e6 100644 --- a/source/extensions/filters/http/squash/config.cc +++ b/contrib/squash/filters/http/source/config.cc @@ -1,12 +1,13 @@ -#include "source/extensions/filters/http/squash/config.h" +#include "contrib/squash/filters/http/source/config.h" -#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" -#include "envoy/extensions/filters/http/squash/v3/squash.pb.validate.h" #include "envoy/registry/registry.h" #include "source/common/protobuf/protobuf.h" #include "source/common/protobuf/utility.h" -#include "source/extensions/filters/http/squash/squash_filter.h" + +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.h" +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.validate.h" +#include "contrib/squash/filters/http/source/squash_filter.h" namespace Envoy { namespace Extensions { diff --git a/source/extensions/filters/http/squash/config.h b/contrib/squash/filters/http/source/config.h similarity index 84% rename from source/extensions/filters/http/squash/config.h rename to contrib/squash/filters/http/source/config.h index c1514fe61ab97..feff75eaf8b0e 100644 --- a/source/extensions/filters/http/squash/config.h +++ b/contrib/squash/filters/http/source/config.h @@ -1,10 +1,10 @@ #pragma once -#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" -#include "envoy/extensions/filters/http/squash/v3/squash.pb.validate.h" - #include "source/extensions/filters/http/common/factory_base.h" +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.h" +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.validate.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { diff --git a/source/extensions/filters/http/squash/squash_filter.cc b/contrib/squash/filters/http/source/squash_filter.cc similarity index 97% rename from source/extensions/filters/http/squash/squash_filter.cc rename to contrib/squash/filters/http/source/squash_filter.cc index d15077455483e..41fc37e5f7613 100644 --- a/source/extensions/filters/http/squash/squash_filter.cc +++ b/contrib/squash/filters/http/source/squash_filter.cc @@ -1,8 +1,7 @@ -#include "source/extensions/filters/http/squash/squash_filter.h" +#include "contrib/squash/filters/http/source/squash_filter.h" #include -#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" #include "envoy/http/codes.h" #include "source/common/common/empty_string.h" @@ -16,6 +15,7 @@ #include "source/common/protobuf/utility.h" #include "absl/container/fixed_array.h" +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.h" namespace Envoy { namespace Extensions { @@ -91,7 +91,8 @@ void SquashFilterConfig::updateTemplateInValue(ProtobufWkt::Value& curvalue) { To interpolate an environment variable named ENV, add '{{ ENV }}' (without the quotes, with the spaces) to the template string. - See api/envoy/config/filter/http/squash/v2/squash.proto for the motivation on why this is needed. + See api/envoy/extensions/squash/filters/http/v3/squash.proto for the motivation on why this is + needed. */ std::string SquashFilterConfig::replaceEnv(const std::string& attachment_template) { std::string s; diff --git a/source/extensions/filters/http/squash/squash_filter.h b/contrib/squash/filters/http/source/squash_filter.h similarity index 98% rename from source/extensions/filters/http/squash/squash_filter.h rename to contrib/squash/filters/http/source/squash_filter.h index 6786a6117a741..df1688210ac4b 100644 --- a/source/extensions/filters/http/squash/squash_filter.h +++ b/contrib/squash/filters/http/source/squash_filter.h @@ -2,7 +2,6 @@ #include -#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" #include "envoy/http/async_client.h" #include "envoy/http/filter.h" #include "envoy/json/json_object.h" @@ -12,6 +11,7 @@ #include "source/common/protobuf/protobuf.h" #include "absl/types/optional.h" +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.h" namespace Envoy { namespace Extensions { diff --git a/test/extensions/filters/http/squash/BUILD b/contrib/squash/filters/http/test/BUILD similarity index 56% rename from test/extensions/filters/http/squash/BUILD rename to contrib/squash/filters/http/test/BUILD index f03de9e17e6d3..a2a9b49f4c9ad 100644 --- a/test/extensions/filters/http/squash/BUILD +++ b/contrib/squash/filters/http/test/BUILD @@ -1,52 +1,46 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_package", -) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", + "envoy_cc_test", + "envoy_contrib_package", ) licenses(["notice"]) # Apache 2 -envoy_package() +envoy_contrib_package() -envoy_extension_cc_test( +envoy_cc_test( name = "squash_filter_test", srcs = ["squash_filter_test.cc"], - extension_names = ["envoy.filters.http.squash"], deps = [ + "//contrib/squash/filters/http/source:squash_filter_lib", "//envoy/event:dispatcher_interface", "//source/common/http:header_map_lib", "//source/common/stats:stats_lib", - "//source/extensions/filters/http/squash:squash_filter_lib", "//test/mocks/http:http_mocks", "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:cluster_manager_mocks", "//test/test_common:environment_lib", - "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/http/squash/v3:pkg_cc_proto", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "squash_filter_integration_test", srcs = ["squash_filter_integration_test.cc"], - extension_names = ["envoy.filters.http.squash"], deps = [ - "//source/extensions/filters/http/squash:config", + "//contrib/squash/filters/http/source:config", "//test/integration:http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) -envoy_extension_cc_test( +envoy_cc_test( name = "config_test", srcs = ["config_test.cc"], - extension_names = ["envoy.filters.http.squash"], deps = [ - "//source/extensions/filters/http/squash:config", + "//contrib/squash/filters/http/source:config", "//test/mocks/server:factory_context_mocks", "//test/test_common:utility_lib", - "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", + "@envoy_api//contrib/envoy/extensions/filters/http/squash/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/squash/config_test.cc b/contrib/squash/filters/http/test/config_test.cc similarity index 88% rename from test/extensions/filters/http/squash/config_test.cc rename to contrib/squash/filters/http/test/config_test.cc index 8964bb665308c..613dc5b67e9ad 100644 --- a/test/extensions/filters/http/squash/config_test.cc +++ b/contrib/squash/filters/http/test/config_test.cc @@ -1,11 +1,9 @@ -#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" -#include "envoy/extensions/filters/http/squash/v3/squash.pb.validate.h" - -#include "source/extensions/filters/http/squash/config.h" - #include "test/mocks/server/factory_context.h" #include "test/test_common/utility.h" +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.h" +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.validate.h" +#include "contrib/squash/filters/http/source/config.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/http/squash/squash_filter_integration_test.cc b/contrib/squash/filters/http/test/squash_filter_integration_test.cc similarity index 100% rename from test/extensions/filters/http/squash/squash_filter_integration_test.cc rename to contrib/squash/filters/http/test/squash_filter_integration_test.cc diff --git a/test/extensions/filters/http/squash/squash_filter_test.cc b/contrib/squash/filters/http/test/squash_filter_test.cc similarity index 99% rename from test/extensions/filters/http/squash/squash_filter_test.cc rename to contrib/squash/filters/http/test/squash_filter_test.cc index be5d8d852bfe3..756e83e402fb5 100644 --- a/test/extensions/filters/http/squash/squash_filter_test.cc +++ b/contrib/squash/filters/http/test/squash_filter_test.cc @@ -2,18 +2,16 @@ #include #include -#include "envoy/common/scope_tracker.h" -#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" - #include "source/common/http/message_impl.h" #include "source/common/protobuf/protobuf.h" -#include "source/extensions/filters/http/squash/squash_filter.h" #include "test/mocks/server/factory_context.h" #include "test/mocks/upstream/cluster_manager.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" +#include "contrib/envoy/extensions/filters/http/squash/v3/squash.pb.h" +#include "contrib/squash/filters/http/source/squash_filter.h" #include "fmt/format.h" #include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/contrib/sxg/filters/http/source/BUILD b/contrib/sxg/filters/http/source/BUILD new file mode 100644 index 0000000000000..34d61bc21a946 --- /dev/null +++ b/contrib/sxg/filters/http/source/BUILD @@ -0,0 +1,47 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_contrib_extension", + "envoy_cc_library", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_library( + name = "sxg_lib", + srcs = [ + "encoder.cc", + "filter.cc", + "filter_config.cc", + ], + hdrs = [ + "encoder.h", + "filter.h", + "filter_config.h", + ], + external_deps = ["libsxg"], + deps = [ + "//envoy/server:filter_config_interface", + "//source/common/config:datasource_lib", + "//source/common/http:codes_lib", + "//source/common/stats:symbol_table_lib", + "//source/common/stats:utility_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "@boringssl//:ssl", + "@envoy_api//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_contrib_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":sxg_lib", + "//envoy/registry", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg_cc_proto", + ], +) diff --git a/contrib/sxg/filters/http/source/config.cc b/contrib/sxg/filters/http/source/config.cc new file mode 100644 index 0000000000000..d487e395803fc --- /dev/null +++ b/contrib/sxg/filters/http/source/config.cc @@ -0,0 +1,71 @@ +#include "contrib/sxg/filters/http/source/config.h" + +#include +#include + +#include "envoy/registry/registry.h" +#include "envoy/secret/secret_manager.h" +#include "envoy/secret/secret_provider.h" + +#include "source/common/protobuf/utility.h" + +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.h" +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.validate.h" +#include "contrib/sxg/filters/http/source/encoder.h" +#include "contrib/sxg/filters/http/source/filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +namespace { +Secret::GenericSecretConfigProviderSharedPtr +secretsProvider(const envoy::extensions::transport_sockets::tls::v3::SdsSecretConfig& config, + Secret::SecretManager& secret_manager, + Server::Configuration::TransportSocketFactoryContext& transport_socket_factory) { + if (config.has_sds_config()) { + return secret_manager.findOrCreateGenericSecretProvider(config.sds_config(), config.name(), + transport_socket_factory); + } else { + return secret_manager.findStaticGenericSecretProvider(config.name()); + } +} +} // namespace + +Http::FilterFactoryCb FilterFactory::createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::sxg::v3alpha::SXG& proto_config, + const std::string& stat_prefix, Server::Configuration::FactoryContext& context) { + const auto& certificate = proto_config.certificate(); + const auto& private_key = proto_config.private_key(); + + auto& cluster_manager = context.clusterManager(); + auto& secret_manager = cluster_manager.clusterManagerFactory().secretManager(); + auto& transport_socket_factory = context.getTransportSocketFactoryContext(); + auto secret_provider_certificate = + secretsProvider(certificate, secret_manager, transport_socket_factory); + if (secret_provider_certificate == nullptr) { + throw EnvoyException("invalid certificate secret configuration"); + } + auto secret_provider_private_key = + secretsProvider(private_key, secret_manager, transport_socket_factory); + if (secret_provider_private_key == nullptr) { + throw EnvoyException("invalid private_key secret configuration"); + } + + auto secret_reader = std::make_shared( + secret_provider_certificate, secret_provider_private_key, context.api()); + auto config = std::make_shared(proto_config, context.timeSource(), secret_reader, + stat_prefix, context.scope()); + return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + const EncoderPtr encoder = std::make_unique(config); + callbacks.addStreamFilter(std::make_shared(config, encoder)); + }; +} + +REGISTER_FACTORY(FilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/source/config.h b/contrib/sxg/filters/http/source/config.h new file mode 100644 index 0000000000000..1d71843641c18 --- /dev/null +++ b/contrib/sxg/filters/http/source/config.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +#include "source/extensions/filters/http/common/factory_base.h" + +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.h" +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.validate.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +class FilterFactory : public Extensions::HttpFilters::Common::FactoryBase< + envoy::extensions::filters::http::sxg::v3alpha::SXG> { +public: + FilterFactory() : FactoryBase("envoy.filters.http.sxg") {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::sxg::v3alpha::SXG& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +DECLARE_FACTORY(FilterFactory); + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/source/encoder.cc b/contrib/sxg/filters/http/source/encoder.cc new file mode 100644 index 0000000000000..924ea3dfebc39 --- /dev/null +++ b/contrib/sxg/filters/http/source/encoder.cc @@ -0,0 +1,243 @@ +#include "contrib/sxg/filters/http/source/encoder.h" + +#include +#include +#include +#include + +#include + +#include "source/common/buffer/buffer_impl.h" +#include "source/common/http/headers.h" + +#include "absl/strings/escaping.h" +#include "contrib/sxg/filters/http/source/filter_config.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +EncoderImpl::~EncoderImpl() { + sxg_header_release(&headers_); + sxg_raw_response_release(&raw_response_); + sxg_signer_list_release(&signer_list_); + sxg_encoded_response_release(&encoded_response_); +} + +void EncoderImpl::setOrigin(const std::string origin) { origin_ = origin; }; + +void EncoderImpl::setUrl(const std::string url) { url_ = url; }; + +bool EncoderImpl::loadHeaders(Http::ResponseHeaderMap* headers) { + const auto& filtered_headers = filteredResponseHeaders(); + bool retval = true; + headers->iterate([this, filtered_headers, + &retval](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const auto& header_key = header.key().getStringView(); + + // filter x-envoy-* headers + if (absl::StartsWith(header_key, ThreadSafeSingleton::get().prefix())) { + return Http::HeaderMap::Iterate::Continue; + } + // filter out the header that we use as a flag to trigger encoding + if (config_->shouldEncodeSXGHeader().get() == header_key) { + return Http::HeaderMap::Iterate::Continue; + } + // filter out other headers by prefix + for (const auto& prefix_filter : config_->headerPrefixFilters()) { + if (absl::StartsWith(header_key, prefix_filter)) { + return Http::HeaderMap::Iterate::Continue; + } + } + // filter out headers that are not allowed to be encoded in the SXG document + if (filtered_headers.find(header_key) != filtered_headers.end()) { + return Http::HeaderMap::Iterate::Continue; + } + + const auto header_value = header.value().getStringView(); + if (!sxg_header_append_string(std::string(header_key).c_str(), + std::string(header_value).c_str(), &headers_)) { + retval = false; + return Http::HeaderMap::Iterate::Break; + } + return Http::HeaderMap::Iterate::Continue; + }); + + return retval; +} + +bool EncoderImpl::loadContent(Buffer::Instance& data) { + const size_t size = data.length(); + if (!sxg_buffer_resize(size, &raw_response_.payload)) { + return false; + } + data.copyOut(0, size, raw_response_.payload.data); + + return true; +} + +constexpr uint64_t ONE_DAY_IN_SECONDS = 86400L; + +bool EncoderImpl::loadSigner() { + // backdate timestamp by 1 day, to account for clock skew + const uint64_t date = getTimestamp() - ONE_DAY_IN_SECONDS; + + const uint64_t expires = date + static_cast(config_->duration()); + const auto validity_url = getValidityUrl(); + + X509* cert = loadX09Cert(); + const auto cert_digest = generateCertDigest(cert); + const auto cbor_url = getCborUrl(cert_digest); + + EVP_PKEY* pri_key = loadPrivateKey(); + + const auto retval = + cert && pri_key && + sxg_add_ecdsa_signer(sxgSigLabel().c_str(), date, expires, validity_url.c_str(), pri_key, + cert, cbor_url.c_str(), &signer_list_); + + if (cert) { + X509_free(cert); + } + if (pri_key) { + EVP_PKEY_free(pri_key); + } + return retval; +} + +bool EncoderImpl::getEncodedResponse() { + // Pass response headers to the response before encoding + if (!sxg_header_copy(&headers_, &raw_response_.header)) { + return false; + } + if (!sxg_encode_response(config_->miRecordSize(), &raw_response_, &encoded_response_)) { + return false; + } + return true; +} + +Buffer::BufferFragment* EncoderImpl::writeSxg() { + sxg_buffer_t result = sxg_empty_buffer(); + if (!sxg_generate(url_.c_str(), &signer_list_, &encoded_response_, &result)) { + sxg_buffer_release(&result); + return nullptr; + } + + return new Buffer::BufferFragmentImpl( + result.data, result.size, + [result](const void*, size_t, const Buffer::BufferFragmentImpl* this_fragment) { + // Capture of result by value passes a const, but sxg_buffer_release does not accept + // a const buffer_t*, so we have to cast it back. This is OK since the important + // operation performed by sxg_buffer_release is to release the data buffer. + sxg_buffer_release(const_cast(&result)); + delete this_fragment; + }); +} + +uint64_t EncoderImpl::getTimestamp() { + const auto now = config_->timeSource().systemTime(); + const auto ts = std::abs(static_cast( + std::chrono::duration_cast(now.time_since_epoch()).count())); + + return ts; +} + +const std::string EncoderImpl::toAbsolute(const std::string& url_or_relative_path) const { + if (!url_or_relative_path.empty() && url_or_relative_path[0] == '/') { + return origin_ + url_or_relative_path; + } else { + return url_or_relative_path; + } +} + +const std::string EncoderImpl::getValidityUrl() const { return toAbsolute(config_->validityUrl()); } + +const std::string EncoderImpl::getCborUrl(const std::string& cert_digest) const { + return fmt::format("{}?d={}", toAbsolute(config_->cborUrl()), cert_digest); +} + +X509* EncoderImpl::loadX09Cert() { + X509* cert = nullptr; + BIO* bio = BIO_new(BIO_s_mem()); + RELEASE_ASSERT(bio != nullptr, ""); + + if (BIO_puts(bio, config_->certificate().c_str()) >= 0) { + cert = PEM_read_bio_X509(bio, nullptr, nullptr, nullptr); + } + + BIO_vfree(bio); + return cert; +} + +EVP_PKEY* EncoderImpl::loadPrivateKey() { + EVP_PKEY* pri_key = nullptr; + BIO* bio = BIO_new(BIO_s_mem()); + RELEASE_ASSERT(bio != nullptr, ""); + + if (BIO_puts(bio, config_->privateKey().c_str()) >= 0) { + pri_key = PEM_read_bio_PrivateKey(bio, nullptr, nullptr, nullptr); + } + + BIO_vfree(bio); + return pri_key; +} + +const uint8_t CERT_DIGEST_BYTES = 8; + +const std::string EncoderImpl::generateCertDigest(X509* cert) const { + uint8_t out[EVP_MAX_MD_SIZE]; + unsigned out_len; + if (!(X509_digest(cert, EVP_sha256(), out, &out_len) && out_len >= CERT_DIGEST_BYTES)) { + return ""; + } + + return absl::BytesToHexString( + absl::string_view(reinterpret_cast(out), CERT_DIGEST_BYTES)); +} + +const std::string& EncoderImpl::sxgSigLabel() const { + // this is currently ignored, so an arbitrary string is safe to use + CONSTRUCT_ON_FIRST_USE(std::string, "label"); +} + +const EncoderImpl::HeaderFilterSet& EncoderImpl::filteredResponseHeaders() const { + CONSTRUCT_ON_FIRST_USE( + HeaderFilterSet, + { + // handled by libsxg, or explicitly by this filter + ":status", + // hop-by-hop headers, see: + // https://tools.ietf.org/id/draft-yasskin-http-origin-signed-responses-05.html#uncached-headers + "connection", + "keep-alive", + "proxy-connection", + "trailer", + "transfer-encoding", + "upgrade", + // Stateful headers, see: + // https://tools.ietf.org/id/draft-yasskin-http-origin-signed-responses-05.html#stateful-headers + // and blocked in http://crrev.com/c/958945. + "authentication-control", + "authentication-info", + "clear-site-data", + "optional-www-authenticate", + "proxy-authenticate", + "proxy-authentication-info", + "public-key-pins", + "sec-websocket-accept", + "set-cookie", + "set-cookie2", + "setprofile", + "strict-transport-security", + "www-authenticate", + // other stateful headers + "vary", + "cache-control", + }); +} + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/source/encoder.h b/contrib/sxg/filters/http/source/encoder.h new file mode 100644 index 0000000000000..a43aeff243aeb --- /dev/null +++ b/contrib/sxg/filters/http/source/encoder.h @@ -0,0 +1,83 @@ +#pragma once + +#include "source/common/buffer/buffer_impl.h" +#include "source/common/config/datasource.h" + +#include "contrib/sxg/filters/http/source/filter_config.h" +#include "libsxg.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +/** + * Helper type to facilitate comparing an absl::string_view key to a std::string. + */ +struct StringCmp { + using IsTransparent = void; + bool operator()(absl::string_view a, absl::string_view b) const { return a < b; } +}; + +class Encoder { +public: + virtual ~Encoder() = default; + + virtual void setOrigin(const std::string origin) PURE; + virtual void setUrl(const std::string url) PURE; + virtual bool loadSigner() PURE; + virtual bool loadHeaders(Http::ResponseHeaderMap* headers) PURE; + virtual bool loadContent(Buffer::Instance& data) PURE; + virtual bool getEncodedResponse() PURE; + virtual Buffer::BufferFragment* writeSxg() PURE; +}; + +using EncoderPtr = std::unique_ptr; + +class EncoderImpl : public Encoder, Logger::Loggable { +public: + explicit EncoderImpl(const FilterConfigSharedPtr& config) + : headers_(sxg_empty_header()), raw_response_(sxg_empty_raw_response()), + signer_list_(sxg_empty_signer_list()), encoded_response_(sxg_empty_encoded_response()), + config_(config) {} + + ~EncoderImpl() override; + + // Filter::Encoder + void setOrigin(const std::string origin) override; + void setUrl(const std::string url) override; + bool loadHeaders(Http::ResponseHeaderMap* headers) override; + bool loadSigner() override; + bool loadContent(Buffer::Instance& data) override; + bool getEncodedResponse() override; + Buffer::BufferFragment* writeSxg() override; + +private: + friend class EncoderTest; + + sxg_header_t headers_; + sxg_raw_response_t raw_response_; + sxg_signer_list_t signer_list_; + sxg_encoded_response_t encoded_response_; + FilterConfigSharedPtr config_; + std::string origin_; + std::string url_; + + uint64_t getTimestamp(); + const std::string toAbsolute(const std::string& url_or_relative_path) const; + const std::string getCborUrl(const std::string& cert_digest) const; + const std::string getValidityUrl() const; + + X509* loadX09Cert(); + EVP_PKEY* loadPrivateKey(); + const std::string& sxgSigLabel() const; + const std::string generateCertDigest(X509* cert) const; + + using HeaderFilterSet = std::set; + const HeaderFilterSet& filteredResponseHeaders() const; +}; + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/source/filter.cc b/contrib/sxg/filters/http/source/filter.cc new file mode 100644 index 0000000000000..7dc0d36d31520 --- /dev/null +++ b/contrib/sxg/filters/http/source/filter.cc @@ -0,0 +1,231 @@ +#include "contrib/sxg/filters/http/source/filter.h" + +#include + +#include "envoy/http/codes.h" +#include "envoy/stats/scope.h" + +#include "source/common/common/utility.h" +#include "source/common/http/headers.h" +#include "source/common/stats/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +Http::RegisterCustomInlineHeader + accept_handle(Http::CustomHeaders::get().Accept); + +Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { + ENVOY_LOG(debug, "sxg filter from decodeHeaders: {}", headers); + if (headers.Host() && headers.Path() && clientAcceptSXG(headers)) { + client_accept_sxg_ = true; + headers.setReference(xCanAcceptSxgKey(), xCanAcceptSxgValue()); + auto origin = fmt::format("https://{}", headers.getHostValue()); + auto url = fmt::format("{}{}", origin, headers.getPathValue()); + encoder_->setOrigin(origin); + encoder_->setUrl(url); + config_->stats().total_client_can_accept_sxg_.inc(); + } + return Http::FilterHeadersStatus::Continue; +} + +void Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) { + decoder_callbacks_ = &callbacks; +} + +Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) { + ENVOY_LOG(debug, "sxg filter from Filter::encodeHeaders"); + + if (client_accept_sxg_ && shouldEncodeSXG(headers)) { + response_headers_ = &headers; + should_encode_sxg_ = true; + config_->stats().total_should_sign_.inc(); + return Http::FilterHeadersStatus::StopIteration; + } + + return Http::FilterHeadersStatus::Continue; +} + +Http::FilterDataStatus Filter::encodeData(Buffer::Instance& data, bool end_stream) { + ENVOY_LOG(debug, "sxg filter from encodeData end_stream: {}", end_stream); + + if (!should_encode_sxg_) { + return Http::FilterDataStatus::Continue; + } + + data_total_ += data.length(); + if (encoderBufferLimitReached(data_total_)) { + should_encode_sxg_ = false; + return Http::FilterDataStatus::Continue; + } + + encoder_callbacks_->addEncodedData(data, false); + + if (!end_stream) { + // We need to know the size of the response in order to generate the SXG, so we wait. + return Http::FilterDataStatus::StopIterationAndBuffer; + } + + doSxg(); + return Http::FilterDataStatus::Continue; +} + +Http::FilterTrailersStatus Filter::encodeTrailers(Http::ResponseTrailerMap&) { + if (should_encode_sxg_) { + doSxg(); + } + return Http::FilterTrailersStatus::Continue; +} + +void Filter::doSxg() { + if (finished_) { + return; + } + + finished_ = true; + + encoder_callbacks_->modifyEncodingBuffer([this](Buffer::Instance& enc_buf) { + config_->stats().total_signed_attempts_.inc(); + + if (!encoder_->loadHeaders(response_headers_)) { + config_->stats().total_signed_failed_.inc(); + return; + } + + if (!encoder_->loadContent(enc_buf)) { + config_->stats().total_signed_failed_.inc(); + return; + } + + if (!encoder_->getEncodedResponse()) { + config_->stats().total_signed_failed_.inc(); + return; + } + + if (!encoder_->loadSigner()) { + config_->stats().total_signed_failed_.inc(); + return; + } + + auto output = encoder_->writeSxg(); + if (!output) { + config_->stats().total_signed_failed_.inc(); + return; + } + + // Make sure that the resulting SXG isn't too big before adding it to the encoding + // buffer. Note that since the buffer fragment hasn't been added to the enc_buf + // yet, we need to call done() directly. + if (encoderBufferLimitReached(output->size() + 100)) { + output->done(); + config_->stats().total_signed_failed_.inc(); + return; + } + + enc_buf.drain(enc_buf.length()); + enc_buf.addBufferFragment(*output); + + response_headers_->setContentLength(enc_buf.length()); + response_headers_->setContentType(sxgContentType()); + + config_->stats().total_signed_succeeded_.inc(); + }); +} + +void Filter::setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) { + encoder_callbacks_ = &callbacks; +} + +bool Filter::clientAcceptSXG(const Http::RequestHeaderMap& headers) { + const absl::string_view accept = headers.getInlineValue(accept_handle.handle()); + + absl::string_view html_q_value = "0"; + absl::string_view sxg_q_value = ""; + // Client can accept signed exchange if accept header has: + // a) application/signed-exchange + // b) with appropriate version (v=b3) + // c) q-value of signed exchange is >= that of text/html + // from: https://web.dev/signed-exchanges/#best-practices + for (const auto& token : StringUtil::splitToken(accept, ",")) { + const auto& type = StringUtil::trim(StringUtil::cropRight(token, ";")); + absl::string_view q_value = "1"; + absl::string_view version = ""; + + const auto params = StringUtil::cropLeft(token, ";"); + for (const auto& param : StringUtil::splitToken(params, ";")) { + if (absl::EqualsIgnoreCase("q", StringUtil::trim(StringUtil::cropRight(param, "=")))) { + q_value = StringUtil::trim(StringUtil::cropLeft(param, "=")); + } + if (absl::EqualsIgnoreCase("v", StringUtil::trim(StringUtil::cropRight(param, "=")))) { + version = StringUtil::trim(StringUtil::cropLeft(param, "=")); + } + } + + if (type == sxgContentTypeUnversioned() && version == acceptedSxgVersion()) { + sxg_q_value = q_value; + } else if (type == htmlContentType()) { + html_q_value = q_value; + } + } + + return sxg_q_value.compare(html_q_value) >= 0; +} + +bool Filter::shouldEncodeSXG(const Http::ResponseHeaderMap& headers) { + if (!(headers.Status() && headers.getStatusValue() == "200")) { + return false; + } + + const auto x_should_encode_sxg_header = headers.get(xShouldEncodeSxgKey()); + return !x_should_encode_sxg_header.empty(); +} + +bool Filter::encoderBufferLimitReached(uint64_t buffer_length) { + const auto limit = encoder_callbacks_->encoderBufferLimit(); + const auto header_size = response_headers_->byteSize(); + + ENVOY_LOG(debug, + "Envoy::Extensions::HttpFilters::SXG::Filter::encoderBufferLimitReached limit: {}, " + "header_size: {} buffer_length: {}", + limit, header_size, buffer_length); + + // note that a value of 0 indicates that no limits are enforced + if (limit && header_size + buffer_length > limit) { + config_->stats().total_exceeded_max_payload_size_.inc(); + return true; + } + return false; +} + +const Http::LowerCaseString& Filter::xCanAcceptSxgKey() const { + return config_->clientCanAcceptSXGHeader(); +} + +const std::string& Filter::xCanAcceptSxgValue() const { + CONSTRUCT_ON_FIRST_USE(std::string, "true"); +} + +const Http::LowerCaseString& Filter::xShouldEncodeSxgKey() const { + return config_->shouldEncodeSXGHeader(); +} + +const std::string& Filter::htmlContentType() const { + CONSTRUCT_ON_FIRST_USE(std::string, "text/html"); +} + +const std::string& Filter::sxgContentTypeUnversioned() const { + CONSTRUCT_ON_FIRST_USE(std::string, "application/signed-exchange"); +} + +const std::string& Filter::acceptedSxgVersion() const { CONSTRUCT_ON_FIRST_USE(std::string, "b3"); } + +const std::string& Filter::sxgContentType() const { + CONSTRUCT_ON_FIRST_USE(std::string, "application/signed-exchange;v=b3"); +} + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/source/filter.h b/contrib/sxg/filters/http/source/filter.h new file mode 100644 index 0000000000000..c58c7e66cee6a --- /dev/null +++ b/contrib/sxg/filters/http/source/filter.h @@ -0,0 +1,72 @@ +#pragma once + +#include "envoy/stats/scope.h" + +#include "source/extensions/filters/http/common/pass_through_filter.h" + +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.h" +#include "contrib/sxg/filters/http/source/encoder.h" +#include "contrib/sxg/filters/http/source/filter_config.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +/** + * Transaction flow: + * 1. check accept request header for whether client can accept sxg + * 2. check response headers for flag to indicate whether downstream wants SXG encoding + * 3. if both true, buffer response body until stream end and then run through the libsxg encoder + * thingy + * + */ +class Filter : public Http::PassThroughFilter, Logger::Loggable { +public: + Filter(const FilterConfigSharedPtr& config, const EncoderPtr& encoder) + : config_(config), encoder_(encoder) {} + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool end_stream) override; + void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks&) override; + + // Http::StreamEncodeFilter + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool end_stream) override; + Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override; + Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override; + void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override; + +private: + friend class FilterTest; + + bool client_accept_sxg_{false}; + bool should_encode_sxg_{false}; + std::shared_ptr config_; + Http::ResponseHeaderMap* response_headers_; + uint64_t data_total_{0}; + bool finished_{false}; + const EncoderPtr& encoder_; + + Http::StreamDecoderFilterCallbacks* decoder_callbacks_; + Http::StreamEncoderFilterCallbacks* encoder_callbacks_; + + void doSxg(); + + const absl::string_view urlStripQueryFragment(absl::string_view path) const; + + bool clientAcceptSXG(const Http::RequestHeaderMap& headers); + bool shouldEncodeSXG(const Http::ResponseHeaderMap& headers); + bool encoderBufferLimitReached(uint64_t buffer_length); + const Http::LowerCaseString& xCanAcceptSxgKey() const; + const std::string& xCanAcceptSxgValue() const; + const Http::LowerCaseString& xShouldEncodeSxgKey() const; + const std::string& htmlContentType() const; + const std::string& sxgContentTypeUnversioned() const; + const std::string& acceptedSxgVersion() const; + const std::string& sxgContentType() const; +}; + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/source/filter_config.cc b/contrib/sxg/filters/http/source/filter_config.cc new file mode 100644 index 0000000000000..8d0fb33600175 --- /dev/null +++ b/contrib/sxg/filters/http/source/filter_config.cc @@ -0,0 +1,49 @@ +#include "contrib/sxg/filters/http/source/filter_config.h" + +#include + +#include "envoy/http/codes.h" +#include "envoy/server/filter_config.h" +#include "envoy/stats/scope.h" + +#include "source/common/common/utility.h" +#include "source/common/http/headers.h" +#include "source/common/stats/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +template +const std::vector initializeHeaderPrefixFilters(const T& filters_proto) { + std::vector filters; + filters.reserve(filters_proto.size()); + + for (const auto& filter : filters_proto) { + filters.emplace_back(filter); + } + + return filters; +} + +FilterConfig::FilterConfig(const envoy::extensions::filters::http::sxg::v3alpha::SXG& proto_config, + TimeSource& time_source, std::shared_ptr secret_reader, + const std::string& stat_prefix, Stats::Scope& scope) + : stats_(generateStats(stat_prefix + "sxg.", scope)), + duration_(proto_config.has_duration() ? proto_config.duration().seconds() : 604800UL), + cbor_url_(proto_config.cbor_url()), validity_url_(proto_config.validity_url()), + mi_record_size_(proto_config.mi_record_size() ? proto_config.mi_record_size() : 4096L), + client_can_accept_sxg_header_(proto_config.client_can_accept_sxg_header().length() > 0 + ? proto_config.client_can_accept_sxg_header() + : "x-client-can-accept-sxg"), + should_encode_sxg_header_(proto_config.should_encode_sxg_header().length() > 0 + ? proto_config.should_encode_sxg_header() + : "x-should-encode-sxg"), + header_prefix_filters_(initializeHeaderPrefixFilters(proto_config.header_prefix_filters())), + time_source_(time_source), secret_reader_(secret_reader) {} + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/source/filter_config.h b/contrib/sxg/filters/http/source/filter_config.h new file mode 100644 index 0000000000000..11532d2e21454 --- /dev/null +++ b/contrib/sxg/filters/http/source/filter_config.h @@ -0,0 +1,120 @@ +#pragma once + +#include "envoy/server/filter_config.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "source/common/config/datasource.h" +#include "source/extensions/filters/http/common/pass_through_filter.h" + +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.h" +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.validate.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +#define ALL_SXG_STATS(COUNTER) \ + COUNTER(total_client_can_accept_sxg) \ + COUNTER(total_should_sign) \ + COUNTER(total_exceeded_max_payload_size) \ + COUNTER(total_signed_attempts) \ + COUNTER(total_signed_succeeded) \ + COUNTER(total_signed_failed) + +struct SignedExchangeStats { + ALL_SXG_STATS(GENERATE_COUNTER_STRUCT) +}; + +// Helper class used to fetch secrets (usually from SDS). +class SecretReader { +public: + virtual ~SecretReader() = default; + virtual const std::string& certificate() const PURE; + virtual const std::string& privateKey() const PURE; +}; + +class SDSSecretReader : public SecretReader { +public: + SDSSecretReader(Secret::GenericSecretConfigProviderSharedPtr certificate_provider, + Secret::GenericSecretConfigProviderSharedPtr private_key_provider, Api::Api& api) + : update_callback_client_(readAndWatchSecret(certificate_, certificate_provider, api)), + update_callback_token_(readAndWatchSecret(private_key_, private_key_provider, api)) {} + + // SecretReader + const std::string& certificate() const override { return certificate_; } + const std::string& privateKey() const override { return private_key_; } + +private: + Envoy::Common::CallbackHandlePtr + readAndWatchSecret(std::string& value, + Secret::GenericSecretConfigProviderSharedPtr& secret_provider, Api::Api& api) { + const auto* secret = secret_provider->secret(); + if (secret != nullptr) { + value = Config::DataSource::read(secret->secret(), true, api); + } + + return secret_provider->addUpdateCallback([secret_provider, &api, &value]() { + const auto* secret = secret_provider->secret(); + if (secret != nullptr) { + value = Config::DataSource::read(secret->secret(), true, api); + } + }); + } + + std::string certificate_; + std::string private_key_; + + Envoy::Common::CallbackHandlePtr update_callback_client_; + Envoy::Common::CallbackHandlePtr update_callback_token_; +}; + +class FilterConfig : public Logger::Loggable { +public: + FilterConfig(const envoy::extensions::filters::http::sxg::v3alpha::SXG& proto_config, + TimeSource& time_source, std::shared_ptr secret_reader, + const std::string& stat_prefix, Stats::Scope&); + ~FilterConfig() = default; + + const SignedExchangeStats stats() { return stats_; }; + + long duration() const { return duration_; }; + long miRecordSize() const { return mi_record_size_; }; + const std::string& cborUrl() const { return cbor_url_; }; + const std::string& validityUrl() const { return validity_url_; }; + TimeSource& timeSource() { return time_source_; }; + const Http::LowerCaseString& clientCanAcceptSXGHeader() { return client_can_accept_sxg_header_; } + const Http::LowerCaseString& shouldEncodeSXGHeader() { return should_encode_sxg_header_; } + const std::vector& headerPrefixFilters() { return header_prefix_filters_; } + + const std::string& certificate() const { return secret_reader_->certificate(); } + const std::string& privateKey() const { return secret_reader_->privateKey(); } + +private: + static SignedExchangeStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return SignedExchangeStats{ALL_SXG_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } + + SignedExchangeStats stats_; + + const long duration_; + const std::string cbor_url_; + const std::string validity_url_; + const long mi_record_size_; + const Http::LowerCaseString client_can_accept_sxg_header_; + const Http::LowerCaseString should_encode_sxg_header_; + const std::vector header_prefix_filters_; + + TimeSource& time_source_; + const std::shared_ptr secret_reader_; + const std::string certificate_identifier_; + const std::string private_key_identifier_; +}; + +using FilterConfigSharedPtr = std::shared_ptr; + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/test/BUILD b/contrib/sxg/filters/http/test/BUILD new file mode 100644 index 0000000000000..62df2d888c329 --- /dev/null +++ b/contrib/sxg/filters/http/test/BUILD @@ -0,0 +1,41 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +envoy_cc_test( + name = "filter_test", + srcs = ["filter_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/sxg/filters/http/source:config", + "//contrib/sxg/filters/http/source:sxg_lib", + "//source/common/secret:secret_manager_impl_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/integration:http_integration_lib", + "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", + "@envoy_api//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg_cc_proto", + ], +) + +envoy_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/sxg/filters/http/source:config", + "//contrib/sxg/filters/http/source:sxg_lib", + "//source/common/secret:secret_manager_impl_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/integration:http_integration_lib", + "//test/mocks/server:server_mocks", + "//test/mocks/upstream:upstream_mocks", + "@envoy_api//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg_cc_proto", + ], +) diff --git a/contrib/sxg/filters/http/test/config_test.cc b/contrib/sxg/filters/http/test/config_test.cc new file mode 100644 index 0000000000000..1fa1fbd354c37 --- /dev/null +++ b/contrib/sxg/filters/http/test/config_test.cc @@ -0,0 +1,122 @@ +#include +#include + +#include "source/common/protobuf/message_validator_impl.h" +#include "source/common/protobuf/utility.h" +#include "source/common/secret/secret_provider_impl.h" + +#include "test/mocks/server/factory_context.h" + +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.h" +#include "contrib/sxg/filters/http/source/config.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +using testing::NiceMock; +using testing::Return; + +namespace { + +void expectCreateFilter(std::string yaml, bool is_sds_config) { + FilterFactory factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + Server::Configuration::MockFactoryContext context; + context.cluster_manager_.initializeClusters({"foo"}, {}); + + // This returns non-nullptr for certificate and private_key. + auto& secret_manager = context.cluster_manager_.cluster_manager_factory_.secretManager(); + if (is_sds_config) { + ON_CALL(secret_manager, findOrCreateGenericSecretProvider(_, _, _)) + .WillByDefault(Return(std::make_shared( + envoy::extensions::transport_sockets::tls::v3::GenericSecret()))); + } else { + ON_CALL(secret_manager, findStaticGenericSecretProvider(_)) + .WillByDefault(Return(std::make_shared( + envoy::extensions::transport_sockets::tls::v3::GenericSecret()))); + } + EXPECT_CALL(context, messageValidationVisitor()); + EXPECT_CALL(context, clusterManager()); + EXPECT_CALL(context, scope()); + EXPECT_CALL(context, timeSource()); + EXPECT_CALL(context, api()); + EXPECT_CALL(context, getTransportSocketFactoryContext()); + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + cb(filter_callback); +} + +// This loads one of the secrets in credentials, and fails the other one. +void expectInvalidSecretConfig(const std::string& failed_secret_name, + const std::string& exception_message) { + const std::string yaml = R"YAML( +certificate: + name: certificate +private_key: + name: private_key +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +)YAML"; + + FilterFactory factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + NiceMock context; + + auto& secret_manager = context.cluster_manager_.cluster_manager_factory_.secretManager(); + ON_CALL(secret_manager, findStaticGenericSecretProvider( + failed_secret_name == "private_key" ? "certificate" : "private_key")) + .WillByDefault(Return(std::make_shared( + envoy::extensions::transport_sockets::tls::v3::GenericSecret()))); + + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException, exception_message); +} + +} // namespace + +TEST(ConfigTest, CreateFilterStaticSecretProvider) { + const std::string yaml = R"YAML( +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +)YAML"; + expectCreateFilter(yaml, false); +} + +TEST(ConfigTest, CreateFilterHasSdsSecret) { + const std::string yaml = R"YAML( +certificate: + name: certificate + sds_config: + path: "xxxx" + resource_api_version: V3 +private_key: + name: private_key + sds_config: + path: "xxxx" + resource_api_version: V3 +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +)YAML"; + + expectCreateFilter(yaml, true); +} + +TEST(ConfigTest, InvalidCertificateSecret) { + expectInvalidSecretConfig("certificate", "invalid certificate secret configuration"); +} + +TEST(ConfigTest, InvalidPrivateKeySecret) { + expectInvalidSecretConfig("private_key", "invalid private_key secret configuration"); +} + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/sxg/filters/http/test/filter_test.cc b/contrib/sxg/filters/http/test/filter_test.cc new file mode 100644 index 0000000000000..0ec8fedcdd5fe --- /dev/null +++ b/contrib/sxg/filters/http/test/filter_test.cc @@ -0,0 +1,1032 @@ +#include + +#include "envoy/stats/stats.h" + +#include "source/common/secret/secret_manager_impl.h" + +#include "test/mocks/http/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.h" +#include "contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.pb.validate.h" +#include "contrib/sxg/filters/http/source/filter.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace SXG { + +using testing::_; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +class MockSecretReader : public SecretReader { +public: + MockSecretReader(const std::string& certificate, const std::string& private_key) + : certificate_(certificate), private_key_(private_key){}; + + const std::string& certificate() const override { return certificate_; } + const std::string& privateKey() const override { return private_key_; } + +private: + const std::string certificate_; + const std::string private_key_; +}; + +class MockEncoder : public Encoder { +public: + MOCK_METHOD(void, setOrigin, (const std::string), (override)); + MOCK_METHOD(void, setUrl, (const std::string), (override)); + MOCK_METHOD(bool, loadSigner, (), (override)); + MOCK_METHOD(bool, loadHeaders, (Http::ResponseHeaderMap*), (override)); + MOCK_METHOD(bool, loadContent, (Buffer::Instance&), (override)); + MOCK_METHOD(bool, getEncodedResponse, (), (override)); + MOCK_METHOD(Buffer::BufferFragment*, writeSxg, (), (override)); +}; + +int extractIntFromBytes(std::string bytes, size_t offset, size_t size) { + if (size <= 0 || size > 8 || bytes.size() < offset + size) { + return 0; + } + int value = 0; + for (size_t i = 0; i < size; i++) { + value <<= 8; + value |= (0xff & bytes[offset + i]); + } + return value; +} + +bool writeIntToBytes(std::string& bytes, uint64_t int_to_write, size_t offset, size_t size) { + if (size <= 0 || size > 8 || bytes.size() < offset + size) { + return false; + } + for (int i = size - 1; i >= 0; i--) { + char byte = 0xff & int_to_write; + bytes[offset + i] = byte; + int_to_write >>= 8; + } + return true; +} + +// The sig value of the SXG document is unique, so we strip it in tests +bool clearSignature(std::string& buffer) { + if (buffer.find("sxg1-b3", 0, 7) == std::string::npos) { + return false; + } + if (buffer[7] != '\0') { + return false; + } + + // The fallback URL length is contained in the 2 bytes following the sxg-b3 + // prefix string and the nullptr byte that follows. We need to know this length + // because the signature length is located after the fallback URL. + size_t fallback_url_size_offset = 8; + size_t fallback_url_size = extractIntFromBytes(buffer, fallback_url_size_offset, 2); + + // the signature length is contained in the 3 bytes following the fallback URL + size_t sig_size_offset = fallback_url_size_offset + 2 + fallback_url_size; + size_t sig_size = extractIntFromBytes(buffer, sig_size_offset, 3); + + const size_t sig_pos = buffer.find("sig=*"); + if (sig_pos == std::string::npos) { + return false; + } + + const size_t start = sig_pos + 5; + const size_t len = buffer.find('*', start) - start; + + // decrement the sig_size in the SXG document by the calculated length + const size_t modified_sig_size = sig_size - len; + if (!writeIntToBytes(buffer, modified_sig_size, sig_size_offset, 3)) { + return false; + } + + // replace the signature piece with empty string + buffer.erase(start, len); + + return true; +} + +class FilterTest : public testing::Test { +public: + FilterTest() = default; + + void setConfiguration() { + std::string config_str(R"YAML( +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +)YAML"); + setConfiguration(config_str); + } + + void setConfiguration(const std::string& config_str) { + std::string certificate(R"PEM( +-----BEGIN CERTIFICATE----- +MIIBhjCCASygAwIBAgIJAIH9REPqIFXTMAkGByqGSM49BAEwMjEUMBIGA1UEAwwL +ZXhhbXBsZS5vcmcxDTALBgNVBAoMBFRlc3QxCzAJBgNVBAYTAlVTMB4XDTIxMDEx +MzAxMDcwMVoXDTIxMDQxMzAxMDcwMVowMjEUMBIGA1UEAwwLZXhhbXBsZS5vcmcx +DTALBgNVBAoMBFRlc3QxCzAJBgNVBAYTAlVTMFkwEwYHKoZIzj0CAQYIKoZIzj0D +AQcDQgAE4ZrHsGLEiP+pV70a8zIERNcu9MBJHHfbeqLUqwGWWU2/YHObf58nE9to +c6lgrko2JdbV6TyWLVUc/M0Pn+OVSaMsMCowEAYKKwYBBAHWeQIBFgQCBQAwFgYD +VR0RBA8wDYILZXhhbXBsZS5vcmcwCQYHKoZIzj0EAQNJADBGAiEAuQJjX+z7j4hR +xtxfs4VPY5RsF5Sawd+mtluRxpoURcsCIQCIGU/11jcuS0UbIpt4B5Gb1UJlSKGi +Dgu+2OKt7qVPrA== +-----END CERTIFICATE----- +)PEM"); + std::string private_key(R"PEM( +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIJyGXecxIQtBwBJWU4Sc5A8UHNt5HnOBR9Oh11AGYa/2oAoGCCqGSM49 +AwEHoUQDQgAE4ZrHsGLEiP+pV70a8zIERNcu9MBJHHfbeqLUqwGWWU2/YHObf58n +E9toc6lgrko2JdbV6TyWLVUc/M0Pn+OVSQ== +-----END EC PRIVATE KEY----- +)PEM"); + + setConfiguration(config_str, certificate, private_key); + } + + void setConfiguration(const std::string& config_str, const std::string& certificate, + const std::string& private_key) { + envoy::extensions::filters::http::sxg::v3alpha::SXG proto; + TestUtility::loadFromYaml(config_str, proto); + + time_system_.setSystemTime(std::chrono::seconds(1610503040)); + + auto secret_reader = std::make_shared(certificate, private_key); + config_ = std::make_shared(proto, time_system_, secret_reader, "", scope_); + } + + void setFilter() { + if (encoder_ == nullptr) { + encoder_ = std::make_unique(config_); + } + setFilter(std::make_shared(config_, encoder_)); + } + + void setFilter(std::shared_ptr filter) { + filter_ = filter; + filter_->setDecoderFilterCallbacks(decoder_callbacks_); + filter_->setEncoderFilterCallbacks(encoder_callbacks_); + } + + void testPassthroughHtml(Http::TestRequestHeaderMapImpl& request_headers, + Http::TestResponseHeaderMapImpl& response_headers, + bool client_can_accept_sxg) { + testPassthroughHtml(request_headers, response_headers, nullptr, client_can_accept_sxg); + } + + void testPassthroughHtml(Http::TestRequestHeaderMapImpl& request_headers, + Http::TestResponseHeaderMapImpl& response_headers, + Http::TestResponseTrailerMapImpl* response_trailers, + bool client_can_accept_sxg) { + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); + Buffer::OwnedImpl data("hi!\n"); + + auto on_modify_encoding_buffer = [&data](std::function cb) { + cb(data); + }; + EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer) + .WillRepeatedly(Invoke(on_modify_encoding_buffer)); + + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, true)); + if (response_trailers) { + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(*response_trailers)); + } + + EXPECT_EQ(response_headers.get(Http::LowerCaseString("content-type")).size(), 1); + EXPECT_EQ( + response_headers.get(Http::LowerCaseString("content-type"))[0]->value().getStringView(), + "text/html"); + EXPECT_EQ("hi!\n", data.toString()); + + const Envoy::Http::LowerCaseString x_client_can_accept_sxg_key("x-client-can-accept-sxg"); + if (client_can_accept_sxg) { + EXPECT_FALSE(request_headers.get(x_client_can_accept_sxg_key).empty()); + EXPECT_EQ("true", + request_headers.get(x_client_can_accept_sxg_key)[0]->value().getStringView()); + EXPECT_EQ(1UL, scope_.counter("sxg.total_client_can_accept_sxg").value()); + } else { + const Envoy::Http::LowerCaseString x_client_can_accept_sxg_key("x-client-can-accept-sxg"); + EXPECT_TRUE(request_headers.get(x_client_can_accept_sxg_key).empty()); + EXPECT_EQ(0UL, scope_.counter("sxg.total_client_can_accept_sxg").value()); + } + EXPECT_EQ(0UL, scope_.counter("sxg.total_should_sign").value()); + EXPECT_EQ(0UL, scope_.counter("sxg.total_exceeded_max_payload_size").value()); + EXPECT_EQ(0UL, scope_.counter("sxg.total_signed_attempts").value()); + EXPECT_EQ(0UL, scope_.counter("sxg.total_signed_succeeded").value()); + EXPECT_EQ(0UL, scope_.counter("sxg.total_signed_failed").value()); + } + + void testFallbackToHtml(Http::TestRequestHeaderMapImpl& request_headers, + Http::TestResponseHeaderMapImpl& response_headers, + bool exceeded_max_payload_size, bool attempted_encode) { + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->encodeHeaders(response_headers, false)); + Buffer::OwnedImpl data("hi!\n"); + + auto on_modify_encoding_buffer = [&data](std::function cb) { + cb(data); + }; + EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer) + .WillRepeatedly(Invoke(on_modify_encoding_buffer)); + + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data, true)); + EXPECT_EQ(response_headers.get(Http::LowerCaseString("content-type")).size(), 1); + EXPECT_EQ( + response_headers.get(Http::LowerCaseString("content-type"))[0]->value().getStringView(), + "text/html"); + EXPECT_EQ("hi!\n", data.toString()); + + const Envoy::Http::LowerCaseString x_client_can_accept_sxg_key("x-client-can-accept-sxg"); + EXPECT_FALSE(request_headers.get(x_client_can_accept_sxg_key).empty()); + EXPECT_EQ("true", request_headers.get(x_client_can_accept_sxg_key)[0]->value().getStringView()); + EXPECT_EQ(1UL, scope_.counter("sxg.total_client_can_accept_sxg").value()); + EXPECT_EQ(1UL, scope_.counter("sxg.total_should_sign").value()); + EXPECT_EQ(exceeded_max_payload_size ? 1UL : 0UL, + scope_.counter("sxg.total_exceeded_max_payload_size").value()); + EXPECT_EQ(attempted_encode ? 1UL : 0L, scope_.counter("sxg.total_signed_attempts").value()); + EXPECT_EQ(0UL, scope_.counter("sxg.total_signed_succeeded").value()); + EXPECT_EQ(attempted_encode ? 1UL : 0UL, scope_.counter("sxg.total_signed_failed").value()); + } + + void testEncodeSignedExchange(Http::TestRequestHeaderMapImpl& request_headers, + Http::TestResponseHeaderMapImpl& response_headers) { + testEncodeSignedExchange(request_headers, response_headers, nullptr); + } + + void testEncodeSignedExchange(Http::TestRequestHeaderMapImpl& request_headers, + Http::TestResponseHeaderMapImpl& response_headers, + Http::TestResponseTrailerMapImpl* response_trailers) { + const Buffer::OwnedImpl sxg( + "sxg1-b3\0\0\x1Ehttps://example.org/hello.html\0\x1\0\0\0\x84" + "label;cert-sha256=*unJ3rwJT2DwWlJAw1lfVLvPjeYoJh0+QUQ97zJQPZtc=*;cert-url=\"https://" + "example.org/.sxg/" + "cert.cbor?d=ba7277af0253d83c\";date=1610416640;expires=1611021440;integrity=\"digest/" + "mi-sha256-03\";sig=**;validity-url=\"https://example.org/.sxg/" + "validity.msg\"\xA4" + "FdigestX9mi-sha256-03=0x0E2wkWVYOJ7Gq8+Kfaiyjo3gYCyaijhGGgkzjPoTo=G:statusC200Lcontent-" + "typeItext/htmlPcontent-encodingLmi-sha256-03\0\0\0\0\0\0\x10\0hi!\n", + 472); + testEncodeSignedExchange(request_headers, response_headers, response_trailers, sxg); + } + + void testEncodeSignedExchange(Http::TestRequestHeaderMapImpl& request_headers, + Http::TestResponseHeaderMapImpl& response_headers, + const Buffer::OwnedImpl& sxg) { + testEncodeSignedExchange(request_headers, response_headers, nullptr, sxg); + } + + void testEncodeSignedExchange(Http::TestRequestHeaderMapImpl& request_headers, + Http::TestResponseHeaderMapImpl& response_headers, + Http::TestResponseTrailerMapImpl* response_trailers, + const Buffer::OwnedImpl& sxg) { + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->encodeHeaders(response_headers, false)); + + Buffer::OwnedImpl accumulated_data; + + EXPECT_CALL(encoder_callbacks_, addEncodedData(_, false)) + .Times(2) + .WillRepeatedly(Invoke( + [&accumulated_data](Buffer::Instance& data, bool) { accumulated_data.add(data); })); + + auto on_modify_encoding_buffer = + [&accumulated_data](std::function cb) { cb(accumulated_data); }; + EXPECT_CALL(encoder_callbacks_, modifyEncodingBuffer) + .WillRepeatedly(Invoke(on_modify_encoding_buffer)); + + Buffer::OwnedImpl chunk1("hi!", 15); + Buffer::OwnedImpl chunk2("\n", 15); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(chunk1, false)); + if (response_trailers) { + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->encodeData(chunk2, false)); + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(*response_trailers)); + } else { + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(chunk2, true)); + } + + std::string result = accumulated_data.toString(); + EXPECT_TRUE(clearSignature(result)); + EXPECT_EQ(response_headers.get(Http::LowerCaseString("content-type")).size(), 1); + EXPECT_EQ( + response_headers.get(Http::LowerCaseString("content-type"))[0]->value().getStringView(), + "application/signed-exchange;v=b3"); + EXPECT_EQ(response_headers.get(Http::LowerCaseString("content-length")).size(), 1); + EXPECT_EQ( + response_headers.get(Http::LowerCaseString("content-length"))[0]->value().getStringView(), + std::to_string(accumulated_data.length())); + EXPECT_EQ(sxg.toString(), result); + + const Envoy::Http::LowerCaseString x_client_can_accept_sxg_key("x-client-can-accept-sxg"); + EXPECT_FALSE(request_headers.get(x_client_can_accept_sxg_key).empty()); + EXPECT_EQ("true", request_headers.get(x_client_can_accept_sxg_key)[0]->value().getStringView()); + EXPECT_EQ(1UL, scope_.counter("sxg.total_client_can_accept_sxg").value()); + EXPECT_EQ(1UL, scope_.counter("sxg.total_should_sign").value()); + EXPECT_EQ(0UL, scope_.counter("sxg.total_exceeded_max_payload_size").value()); + EXPECT_EQ(1UL, scope_.counter("sxg.total_signed_attempts").value()); + EXPECT_EQ(1UL, scope_.counter("sxg.total_signed_succeeded").value()); + EXPECT_EQ(0UL, scope_.counter("sxg.total_signed_failed").value()); + } + + void callDoSxgAgain() { filter_->doSxg(); } + + Stats::TestUtil::TestStore scope_; + Event::SimulatedTimeSystem time_system_; + std::shared_ptr config_; + std::unique_ptr encoder_; + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; + std::shared_ptr filter_; +}; + +// Verifies that the OAuth SDSSecretReader correctly updates dynamic generic secret. +TEST_F(FilterTest, SdsDynamicGenericSecret) { + NiceMock config_tracker; + Secret::SecretManagerImpl secret_manager{config_tracker}; + envoy::config::core::v3::ConfigSource config_source; + + NiceMock secret_context; + NiceMock local_info; + Api::ApiPtr api = Api::createApiForTest(); + Stats::IsolatedStoreImpl stats; + NiceMock init_manager; + Init::TargetHandlePtr init_handle; + NiceMock dispatcher; + EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); + EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api)); + EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); + EXPECT_CALL(init_manager, add(_)) + .WillRepeatedly(Invoke([&init_handle](const Init::Target& target) { + init_handle = target.createHandle("test"); + })); + + auto certificate_secret_provider = secret_manager.findOrCreateGenericSecretProvider( + config_source, "certificate", secret_context); + auto certificate_callback = secret_context.cluster_manager_.subscription_factory_.callbacks_; + auto private_key_secret_provider = secret_manager.findOrCreateGenericSecretProvider( + config_source, "private_key", secret_context); + auto private_key_callback = secret_context.cluster_manager_.subscription_factory_.callbacks_; + + SDSSecretReader secret_reader(certificate_secret_provider, private_key_secret_provider, *api); + EXPECT_TRUE(secret_reader.certificate().empty()); + EXPECT_TRUE(secret_reader.privateKey().empty()); + + const std::string yaml_client = R"YAML( +name: certificate +generic_secret: + secret: + inline_string: "certificate_test" +)YAML"; + + envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; + TestUtility::loadFromYaml(yaml_client, typed_secret); + const auto decoded_resources_client = TestUtility::decodeResources({typed_secret}); + + certificate_callback->onConfigUpdate(decoded_resources_client.refvec_, ""); + EXPECT_EQ(secret_reader.certificate(), "certificate_test"); + EXPECT_EQ(secret_reader.privateKey(), ""); + + const std::string yaml_token = R"YAML( +name: private_key +generic_secret: + secret: + inline_string: "private_key_test" +)YAML"; + TestUtility::loadFromYaml(yaml_token, typed_secret); + const auto decoded_resources_token = TestUtility::decodeResources({typed_secret}); + + private_key_callback->onConfigUpdate(decoded_resources_token.refvec_, ""); + EXPECT_EQ(secret_reader.certificate(), "certificate_test"); + EXPECT_EQ(secret_reader.privateKey(), "private_key_test"); + + const std::string yaml_client_recheck = R"EOF( +name: certificate +generic_secret: + secret: + inline_string: "certificate_test_recheck" +)EOF"; + TestUtility::loadFromYaml(yaml_client_recheck, typed_secret); + const auto decoded_resources_client_recheck = TestUtility::decodeResources({typed_secret}); + + certificate_callback->onConfigUpdate(decoded_resources_client_recheck.refvec_, ""); + EXPECT_EQ(secret_reader.certificate(), "certificate_test_recheck"); + EXPECT_EQ(secret_reader.privateKey(), "private_key_test"); +} + +TEST_F(FilterTest, NoHostHeader) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, false); +} + +TEST_F(FilterTest, AcceptTextHtml) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"accept", "text/html"}, {"host", "example.org"}, {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, false); +} + +TEST_F(FilterTest, HtmlWithTrailers) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"accept", "text/html"}, {"host", "example.org"}, {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "text/html"}, + {":status", "200"}}; + Http::TestResponseTrailerMapImpl response_trailers{{"x-test-sample-trailer", "wait for me!"}}; + testPassthroughHtml(request_headers, response_headers, &response_trailers, false); +} + +TEST_F(FilterTest, NoPathHeader) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, false); +} + +TEST_F(FilterTest, NoAcceptHeader) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"host", "example.org"}, {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, false); +} + +TEST_F(FilterTest, NoStatusHeader) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "text/html"}, + {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, true); +} + +TEST_F(FilterTest, Status404) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "404"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, true); +} + +TEST_F(FilterTest, XShouldEncodeNotSet) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "text/html"}, + {":status", "200"}}; + testPassthroughHtml(request_headers, response_headers, true); +} + +TEST_F(FilterTest, AcceptTextHtmlWithQ) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "text/html;q=0.8"}, + {":protocol", "https"}, + {":host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, false); +} + +TEST_F(FilterTest, AcceptApplicationSignedExchangeNoVersion) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"accept", "application/signed-exchange"}, {"host", "example.org"}, {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, false); +} + +TEST_F(FilterTest, AcceptApplicationSignedExchangeWithVersionB2) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b2"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, false); +} + +TEST_F(FilterTest, AcceptApplicationSignedExchangeWithVersionB3) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testEncodeSignedExchange(request_headers, response_headers); +} + +TEST_F(FilterTest, AcceptApplicationSignedExchangeWithVersionB3WithQ) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"accept", "application/signed-exchange;v=b3;q=0.9"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testEncodeSignedExchange(request_headers, response_headers); +} + +TEST_F(FilterTest, AcceptMultipleTextHtml) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.8,text/html;q=0.9"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testPassthroughHtml(request_headers, response_headers, false); +} + +TEST_F(FilterTest, AcceptMultipleSignedExchange) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testEncodeSignedExchange(request_headers, response_headers); +} + +TEST_F(FilterTest, ResponseExceedsMaxPayloadSize) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + EXPECT_CALL(encoder_callbacks_, encoderBufferLimit).WillRepeatedly(Return(10)); + testFallbackToHtml(request_headers, response_headers, true, false); +} + +TEST_F(FilterTest, ResponseExceedsMaxPayloadSizeEncodeFail) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + EXPECT_CALL(encoder_callbacks_, encoderBufferLimit) + .WillOnce(Return(100000)) + .WillRepeatedly(Return(10)); + testFallbackToHtml(request_headers, response_headers, true, true); +} + +TEST_F(FilterTest, UrlWithQueryParam) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"accept", "application/signed-exchange;v=b3;q=0.9"}, + {"host", "example.org"}, + {":path", "/hello.html?good=bye"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + const Buffer::OwnedImpl expected_sxg( + "sxg1-b3\0\0\x27https://example.org/hello.html?good=bye\0\x1\0\0\0\x84" + "label;cert-sha256=*unJ3rwJT2DwWlJAw1lfVLvPjeYoJh0+QUQ97zJQPZtc=*;cert-url=\"https://" + "example.org/.sxg/" + "cert.cbor?d=ba7277af0253d83c\";date=1610416640;expires=1611021440;integrity=\"digest/" + "mi-sha256-03\";sig=**;validity-url=\"https://example.org/.sxg/" + "validity.msg\"\xA4" + "FdigestX9mi-sha256-03=0x0E2wkWVYOJ7Gq8+Kfaiyjo3gYCyaijhGGgkzjPoTo=G:statusC200Lcontent-" + "typeItext/htmlPcontent-encodingLmi-sha256-03\0\0\0\0\0\0\x10\0hi!\n", + 481); + testEncodeSignedExchange(request_headers, response_headers, expected_sxg); +} + +TEST_F(FilterTest, CborValdityFullUrls) { + setConfiguration({R"YAML( +cbor_url: "https://amp.example.org/cert.cbor" +validity_url: "https://amp.example.org/validity.msg" +)YAML"}); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + const Buffer::OwnedImpl expected_sxg( + "sxg1-b3\0\0\x1Ehttps://example.org/hello.html\0\0\xFE\0\0\x84" + "label;cert-sha256=*unJ3rwJT2DwWlJAw1lfVLvPjeYoJh0+QUQ97zJQPZtc=*;cert-url=\"https://" + "amp.example.org/" + "cert.cbor?d=ba7277af0253d83c\";date=1610416640;expires=1611021440;integrity=\"digest/" + "mi-sha256-03\";sig=**;validity-url=\"https://amp.example.org/" + "validity.msg\"\xA4" + "FdigestX9mi-sha256-03=0x0E2wkWVYOJ7Gq8+Kfaiyjo3gYCyaijhGGgkzjPoTo=G:statusC200Lcontent-" + "typeItext/htmlPcontent-encodingLmi-sha256-03\0\0\0\0\0\0\x10\0hi!\n", + 470); + testEncodeSignedExchange(request_headers, response_headers, expected_sxg); +} + +TEST_F(FilterTest, WithHttpTrailers) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"accept", "application/signed-exchange;v=b3;q=0.9"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + Http::TestResponseTrailerMapImpl response_trailers{{"x-test-sample-trailer", "wait for me!"}}; + testEncodeSignedExchange(request_headers, response_headers, &response_trailers); +} + +TEST_F(FilterTest, WithCustomShouldEncodeHeader) { + setConfiguration({R"YAML( +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +should_encode_sxg_header: "x-custom-should-encode-sxg" +)YAML"}); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{ + {"accept", "application/signed-exchange;v=b3;q=0.9"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-custom-should-encode-sxg", "true"}}; + testEncodeSignedExchange(request_headers, response_headers); +} + +TEST_F(FilterTest, FilterXEnvoyHeaders) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "text/html"}, + {":status", "200"}, + {"x-should-encode-sxg", "true"}, + {"x-envoy-something", "something"}}; + testEncodeSignedExchange(request_headers, response_headers); +} + +TEST_F(FilterTest, FilterCustomHeaders) { + setConfiguration({R"YAML( +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +header_prefix_filters: + - "x-foo-" + - "x-bar-" +)YAML"}); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "text/html"}, + {":status", "200"}, + {"x-should-encode-sxg", "true"}, + {"x-foo-bar", "foo"}, + {"x-bar-baz", "bar"}}; + testEncodeSignedExchange(request_headers, response_headers); + const Envoy::Http::LowerCaseString x_client_can_accept_sxg_key("x-client-can-accept-sxg"); +} + +TEST_F(FilterTest, CustomHeader) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "text/html"}, + {":status", "200"}, + {"x-should-encode-sxg", "true"}, + {"x-special-header", "very special"}}; + const Buffer::OwnedImpl expected_sxg( + "sxg1-b3\0\0\x1Ehttps://example.org/hello.html\0\x1\0\0\0\xA2" + "label;cert-sha256=*unJ3rwJT2DwWlJAw1lfVLvPjeYoJh0+QUQ97zJQPZtc=*;cert-url=\"https://" + "example.org/.sxg/" + "cert.cbor?d=ba7277af0253d83c\";date=1610416640;expires=1611021440;integrity=\"digest/" + "mi-sha256-03\";sig=**;validity-url=\"https://example.org/.sxg/" + "validity.msg\"\xA5" + "FdigestX9mi-sha256-03=0x0E2wkWVYOJ7Gq8+Kfaiyjo3gYCyaijhGGgkzjPoTo=G:statusC200Lcontent-" + "typeItext/htmlPcontent-encodingLmi-sha256-03Px-special-headerLvery special" + "\0\0\0\0\0\0\x10\0hi!\n", + 502); + testEncodeSignedExchange(request_headers, response_headers, expected_sxg); +} + +TEST_F(FilterTest, ExtraHeaders) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{{"content-type", "text/html"}, + {":status", "200"}, + {"x-should-encode-sxg", "true"}, + {"x-special-header", "twice"}, + {"x-special-header", "as special"}}; + const Buffer::OwnedImpl expected_sxg( + "sxg1-b3\0\0\x1Ehttps://example.org/hello.html\0\x1\x0\0\0\xA6" + "label;cert-sha256=*unJ3rwJT2DwWlJAw1lfVLvPjeYoJh0+QUQ97zJQPZtc=*;cert-url=\"https://" + "example.org/.sxg/" + "cert.cbor?d=ba7277af0253d83c\";date=1610416640;expires=1611021440;integrity=\"digest/" + "mi-sha256-03\";sig=**;validity-url=\"https://example.org/.sxg/" + "validity.msg\"\xA5" + "FdigestX9mi-sha256-03=0x0E2wkWVYOJ7Gq8+Kfaiyjo3gYCyaijhGGgkzjPoTo=G:statusC200Lcontent-" + "typeItext/htmlPcontent-encodingLmi-sha256-03Px-special-headerP" + "twice,as special" + "\0\0\0\0\0\0\x10\0hi!\n", + 506); + + testEncodeSignedExchange(request_headers, response_headers, expected_sxg); +} + +TEST_F(FilterTest, TestDoubleDoSxg) { + setConfiguration(); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testEncodeSignedExchange(request_headers, response_headers); + callDoSxgAgain(); +} + +TEST_F(FilterTest, LoadHeadersFailure) { + setConfiguration(); + encoder_ = std::make_unique(); + setFilter(); + EXPECT_CALL(*static_cast(encoder_.get()), setOrigin); + EXPECT_CALL(*static_cast(encoder_.get()), setUrl); + EXPECT_CALL(*static_cast(encoder_.get()), loadHeaders).WillOnce(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testFallbackToHtml(request_headers, response_headers, false, true); +} + +TEST_F(FilterTest, LoadContentFailure) { + setConfiguration(); + encoder_ = std::make_unique(); + setFilter(); + EXPECT_CALL(*static_cast(encoder_.get()), setOrigin); + EXPECT_CALL(*static_cast(encoder_.get()), setUrl); + EXPECT_CALL(*static_cast(encoder_.get()), loadHeaders).WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), loadContent).WillOnce(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testFallbackToHtml(request_headers, response_headers, false, true); +} + +TEST_F(FilterTest, GetEncodedResponseFailure) { + setConfiguration(); + encoder_ = std::make_unique(); + setFilter(); + EXPECT_CALL(*static_cast(encoder_.get()), setOrigin); + EXPECT_CALL(*static_cast(encoder_.get()), setUrl); + EXPECT_CALL(*static_cast(encoder_.get()), loadHeaders).WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), loadContent).WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), getEncodedResponse) + .WillOnce(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testFallbackToHtml(request_headers, response_headers, false, true); +} + +TEST_F(FilterTest, LoadSignerFailure) { + setConfiguration(); + encoder_ = std::make_unique(); + setFilter(); + EXPECT_CALL(*static_cast(encoder_.get()), setOrigin); + EXPECT_CALL(*static_cast(encoder_.get()), setUrl); + EXPECT_CALL(*static_cast(encoder_.get()), loadHeaders).WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), loadContent).WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), getEncodedResponse) + .WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), loadSigner).WillOnce(Return(false)); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testFallbackToHtml(request_headers, response_headers, false, true); +} + +TEST_F(FilterTest, WriteSxgFailure) { + setConfiguration(); + encoder_ = std::make_unique(); + setFilter(); + EXPECT_CALL(*static_cast(encoder_.get()), setOrigin); + EXPECT_CALL(*static_cast(encoder_.get()), setUrl); + EXPECT_CALL(*static_cast(encoder_.get()), loadHeaders).WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), loadContent).WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), getEncodedResponse) + .WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), loadSigner).WillOnce(Return(true)); + EXPECT_CALL(*static_cast(encoder_.get()), writeSxg).WillOnce(Return(nullptr)); + + Http::TestRequestHeaderMapImpl request_headers{ + {"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3;q=0.9,text/html;q=0.8"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testFallbackToHtml(request_headers, response_headers, false, true); +} + +// MyCombinedCertKeyId +TEST_F(FilterTest, CombiedCertificateId) { + const std::string certificate(R"PEM( +-----BEGIN CERTIFICATE----- +MIIBhjCCASygAwIBAgIJAIH9REPqIFXTMAkGByqGSM49BAEwMjEUMBIGA1UEAwwL +ZXhhbXBsZS5vcmcxDTALBgNVBAoMBFRlc3QxCzAJBgNVBAYTAlVTMB4XDTIxMDEx +MzAxMDcwMVoXDTIxMDQxMzAxMDcwMVowMjEUMBIGA1UEAwwLZXhhbXBsZS5vcmcx +DTALBgNVBAoMBFRlc3QxCzAJBgNVBAYTAlVTMFkwEwYHKoZIzj0CAQYIKoZIzj0D +AQcDQgAE4ZrHsGLEiP+pV70a8zIERNcu9MBJHHfbeqLUqwGWWU2/YHObf58nE9to +c6lgrko2JdbV6TyWLVUc/M0Pn+OVSaMsMCowEAYKKwYBBAHWeQIBFgQCBQAwFgYD +VR0RBA8wDYILZXhhbXBsZS5vcmcwCQYHKoZIzj0EAQNJADBGAiEAuQJjX+z7j4hR +xtxfs4VPY5RsF5Sawd+mtluRxpoURcsCIQCIGU/11jcuS0UbIpt4B5Gb1UJlSKGi +Dgu+2OKt7qVPrA== +-----END CERTIFICATE----- +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIJyGXecxIQtBwBJWU4Sc5A8UHNt5HnOBR9Oh11AGYa/2oAoGCCqGSM49 +AwEHoUQDQgAE4ZrHsGLEiP+pV70a8zIERNcu9MBJHHfbeqLUqwGWWU2/YHObf58n +E9toc6lgrko2JdbV6TyWLVUc/M0Pn+OVSQ== +-----END EC PRIVATE KEY----- +)PEM"); + + setConfiguration({R"YAML( +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +)YAML"}, + certificate, certificate); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"accept", "application/signed-exchange;v=b3"}, + {"host", "example.org"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + testEncodeSignedExchange(request_headers, response_headers); +} + +TEST_F(FilterTest, BadCertificateId) { + const std::string certificate(""); + const std::string private_key(R"PEM( +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIJyGXecxIQtBwBJWU4Sc5A8UHNt5HnOBR9Oh11AGYa/2oAoGCCqGSM49 +AwEHoUQDQgAE4ZrHsGLEiP+pV70a8zIERNcu9MBJHHfbeqLUqwGWWU2/YHObf58n +E9toc6lgrko2JdbV6TyWLVUc/M0Pn+OVSQ== +-----END EC PRIVATE KEY----- +)PEM"); + + setConfiguration({R"YAML( +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +)YAML"}, + certificate, private_key); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + + testFallbackToHtml(request_headers, response_headers, false, true); +} +std::string private_key(R"PEM( +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIJyGXecxIQtBwBJWU4Sc5A8UHNt5HnOBR9Oh11AGYa/2oAoGCCqGSM49 +AwEHoUQDQgAE4ZrHsGLEiP+pV70a8zIERNcu9MBJHHfbeqLUqwGWWU2/YHObf58n +E9toc6lgrko2JdbV6TyWLVUc/M0Pn+OVSQ== +-----END EC PRIVATE KEY----- +)PEM"); + +TEST_F(FilterTest, BadPriKeyId) { + const std::string certificate(R"PEM( +-----BEGIN CERTIFICATE----- +MIIBhjCCASygAwIBAgIJAIH9REPqIFXTMAkGByqGSM49BAEwMjEUMBIGA1UEAwwL +ZXhhbXBsZS5vcmcxDTALBgNVBAoMBFRlc3QxCzAJBgNVBAYTAlVTMB4XDTIxMDEx +MzAxMDcwMVoXDTIxMDQxMzAxMDcwMVowMjEUMBIGA1UEAwwLZXhhbXBsZS5vcmcx +DTALBgNVBAoMBFRlc3QxCzAJBgNVBAYTAlVTMFkwEwYHKoZIzj0CAQYIKoZIzj0D +AQcDQgAE4ZrHsGLEiP+pV70a8zIERNcu9MBJHHfbeqLUqwGWWU2/YHObf58nE9to +c6lgrko2JdbV6TyWLVUc/M0Pn+OVSaMsMCowEAYKKwYBBAHWeQIBFgQCBQAwFgYD +VR0RBA8wDYILZXhhbXBsZS5vcmcwCQYHKoZIzj0EAQNJADBGAiEAuQJjX+z7j4hR +xtxfs4VPY5RsF5Sawd+mtluRxpoURcsCIQCIGU/11jcuS0UbIpt4B5Gb1UJlSKGi +Dgu+2OKt7qVPrA== +-----END CERTIFICATE----- +)PEM"); + const std::string private_key(""); + + setConfiguration({R"YAML( +cbor_url: "/.sxg/cert.cbor" +validity_url: "/.sxg/validity.msg" +)YAML"}, + certificate, private_key); + setFilter(); + + Http::TestRequestHeaderMapImpl request_headers{{"host", "example.org"}, + {"accept", "application/signed-exchange;v=b3"}, + {":path", "/hello.html"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {"content-type", "text/html"}, {":status", "200"}, {"x-should-encode-sxg", "true"}}; + + testFallbackToHtml(request_headers, response_headers, false, true); +} + +} // namespace SXG +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/docs/BUILD b/docs/BUILD index e2a0c33e0114f..51f3e2b4a1cd3 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -115,11 +115,15 @@ pkg_tar( genrule( name = "extensions_security_rst", - srcs = ["//source/extensions:extensions_metadata.yaml"], + srcs = [ + "//source/extensions:extensions_metadata.yaml", + "//contrib:extensions_metadata.yaml", + ], outs = ["extensions_security_rst.tar"], cmd = """ $(location //tools/docs:generate_extensions_security_rst) \\ - $(location //source/extensions:extensions_metadata.yaml) $@ + $(location //source/extensions:extensions_metadata.yaml) \\ + $(location //contrib:extensions_metadata.yaml) $@ """, tools = ["//tools/docs:generate_extensions_security_rst"], ) diff --git a/docs/conf.py b/docs/conf.py index 962f3ec39ade8..fb1b0afb11400 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,7 +22,7 @@ import sphinx_rtd_theme -class SphinxConfigException(Exception): +class SphinxConfigError(Exception): pass @@ -60,7 +60,7 @@ def setup(app): or not os.path.exists(os.environ["ENVOY_DOCS_BUILD_CONFIG"])) if missing_config: - raise SphinxConfigException( + raise SphinxConfigError( "`ENVOY_DOCS_BUILD_CONFIG` env var must be defined, " "and point to a valid yaml file") @@ -70,7 +70,7 @@ def setup(app): def _config(key): if not configs.get(key): - raise SphinxConfigException(f"`{key}` config var must be defined") + raise SphinxConfigError(f"`{key}` config var must be defined") return configs[key] @@ -338,10 +338,21 @@ def _config(key): 'v1.14.7': ('https://www.envoyproxy.io/docs/envoy/v1.14.7', None), 'v1.15.0': ('https://www.envoyproxy.io/docs/envoy/v1.15.0', None), 'v1.15.4': ('https://www.envoyproxy.io/docs/envoy/v1.15.4', None), + 'v1.15.5': ('https://www.envoyproxy.io/docs/envoy/v1.15.5', None), 'v1.16.0': ('https://www.envoyproxy.io/docs/envoy/v1.16.0', None), + 'v1.16.1': ('https://www.envoyproxy.io/docs/envoy/v1.16.1', None), + 'v1.16.2': ('https://www.envoyproxy.io/docs/envoy/v1.16.2', None), 'v1.16.3': ('https://www.envoyproxy.io/docs/envoy/v1.16.3', None), + 'v1.16.4': ('https://www.envoyproxy.io/docs/envoy/v1.16.4', None), + 'v1.16.5': ('https://www.envoyproxy.io/docs/envoy/v1.16.5', None), 'v1.17.0': ('https://www.envoyproxy.io/docs/envoy/v1.17.0', None), 'v1.17.1': ('https://www.envoyproxy.io/docs/envoy/v1.17.1', None), 'v1.17.2': ('https://www.envoyproxy.io/docs/envoy/v1.17.2', None), - 'v1.18.0': ('https://www.envoyproxy.io/docs/envoy/v1.18.2', None) + 'v1.17.3': ('https://www.envoyproxy.io/docs/envoy/v1.17.3', None), + 'v1.17.4': ('https://www.envoyproxy.io/docs/envoy/v1.17.4', None), + 'v1.18.0': ('https://www.envoyproxy.io/docs/envoy/v1.18.2', None), + 'v1.18.3': ('https://www.envoyproxy.io/docs/envoy/v1.18.3', None), + 'v1.18.4': ('https://www.envoyproxy.io/docs/envoy/v1.18.4', None), + 'v1.19.0': ('https://www.envoyproxy.io/docs/envoy/v1.19.0', None), + 'v1.19.1': ('https://www.envoyproxy.io/docs/envoy/v1.19.1', None), } diff --git a/docs/root/api-docs/xds_protocol.rst b/docs/root/api-docs/xds_protocol.rst index 957b8dc1c48bc..77428cc932ad8 100644 --- a/docs/root/api-docs/xds_protocol.rst +++ b/docs/root/api-docs/xds_protocol.rst @@ -445,12 +445,14 @@ that point, clearing the list of subscribed resources is interpretted as an unsu to "*". For example, in SotW: + - Client sends a request with :ref:`resource_names ` unset. Server interprets this as a subscription to "*". - Client sends a request with :ref:`resource_names ` set to "*" and "A". Server interprets this as continuing the existing subscription to "*" and adding a new subscription to "A". - Client sends a request with :ref:`resource_names ` set to "A". Server interprets this as unsubscribing to "*" and continuing the existing subscription to "A". - Client sends a request with :ref:`resource_names ` unset. Server interprets this as unsubscribing to "A" (i.e., the client has now unsubscribed to all resources). Although this request is identical to the first one, it is not interpreted as a wildcard subscription, because there has previously been a request on this stream for this resource type that set the :ref:`resource_names ` field. And in incremental: + - Client sends a request with :ref:`resource_names_subscribe ` unset. Server interprets this as a subscription to "*". - Client sends a request with :ref:`resource_names_subscribe ` set to "A". Server interprets this as continuing the existing subscription to "*" and adding a new subscription to "A". - Client sends a request with :ref:`resource_names_unsubscribe ` set to "*". Server interprets this as unsubscribing to "*" and continuing the existing subscription to "A". diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index ea123c074ca1e..ddfb0fe7bb0c5 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -20,12 +20,14 @@ Common messages ../config/core/v3/socket_option.proto ../config/core/v3/udp_socket_config.proto ../config/core/v3/substitution_format_string.proto + ../extensions/common/key_value/v3/config.proto ../extensions/common/ratelimit/v3/ratelimit.proto ../extensions/filters/common/fault/v3/fault.proto ../extensions/network/socket_interface/v3/default_socket_interface.proto ../extensions/common/matching/v3/extension_matcher.proto ../extensions/filters/common/dependency/v3/dependency.proto ../extensions/filters/common/matcher/action/v3/skip_action.proto + ../extensions/key_value/file_based/v3/config.proto ../extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto ../extensions/matching/input_matchers/ip/v3/ip.proto ../extensions/matching/common_inputs/environment_variable/v3/input.proto diff --git a/docs/root/configuration/http/http_conn_man/header_sanitizing.rst b/docs/root/configuration/http/http_conn_man/header_sanitizing.rst index 47040620f7578..b7031401f6a3d 100644 --- a/docs/root/configuration/http/http_conn_man/header_sanitizing.rst +++ b/docs/root/configuration/http/http_conn_man/header_sanitizing.rst @@ -39,3 +39,4 @@ Envoy will potentially sanitize the following headers: * :ref:`x-forwarded-for ` * :ref:`x-forwarded-proto ` * :ref:`x-request-id ` +* :ref:`referer ` diff --git a/docs/root/configuration/http/http_conn_man/headers.rst b/docs/root/configuration/http/http_conn_man/headers.rst index 5ea4e7946dc84..de03c5e89ca20 100644 --- a/docs/root/configuration/http/http_conn_man/headers.rst +++ b/docs/root/configuration/http/http_conn_man/headers.rst @@ -48,6 +48,13 @@ server The *server* header will be set during encoding to the value in the :ref:`server_name ` option. +.. _config_http_conn_man_headers_referer: + +referer +------- + +The *referer* header will be sanitized during decoding. Multiple URLs or invalid URLs will be removed. + .. _config_http_conn_man_headers_x-client-trace-id: x-client-trace-id @@ -210,10 +217,16 @@ Given an HTTP request that has traveled through a series of zero or more proxies Envoy, the trusted client address is the earliest source IP address that is known to be accurate. The source IP address of the immediate downstream node's connection to Envoy is trusted. XFF *sometimes* can be trusted. Malicious clients can forge XFF, but the last -address in XFF can be trusted if it was put there by a trusted proxy. Alternatively, Envoy -supports :ref:`extensions ` +address in XFF can be trusted if it was put there by a trusted proxy. + +Alternatively, Envoy supports +:ref:`extensions ` for determining the *trusted client address* or original IP address. +.. note:: + + The use of such extensions cannot be mixed with *use_remote_address* nor *xff_num_trusted_hops*. + Envoy's default rules for determining the trusted client address (*before* appending anything to XFF) are: @@ -223,11 +236,8 @@ to XFF) are: node's connection to Envoy. In an environment where there are one or more trusted proxies in front of an edge -Envoy instance, the :ref:`XFF extension ` -can be configured via the :ref:`original_ip_detection_extensions field -` -to set the *xff_num_trusted_hops* option which controls the number of additional -addresses that are to be trusted: +Envoy instance, the *xff_num_trusted_hops* configuration option can be used to trust +additional addresses from XFF: * If *use_remote_address* is false and *xff_num_trusted_hops* is set to a value *N* that is greater than zero, the trusted client address is the (N+1)th address from the right end diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index 3a4d7a568c82f..a12c2adcc2afa 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -196,11 +196,6 @@ On the upstream side all http3 statistics are rooted at *cluster..http3.* rx_reset, Counter, Total number of reset stream frames received by Envoy tx_reset, Counter, Total number of reset stream frames transmitted by Envoy metadata_not_supported_error, Counter, Total number of metadata dropped during HTTP/3 encoding - quic_version_43, Counter, Total number of quic connections that use transport version 43. This is expected to be removed when this version is deprecated. - quic_version_46, Counter, Total number of quic connections that use transport version 46. This is expected to be removed when this version is deprecated. - quic_version_50, Counter, Total number of quic connections that use transport version 50. This is expected to be removed when this version is deprecated. - quic_version_51, Counter, Total number of quic connections that use transport version 51. This is expected to be removed when this version is deprecated. - quic_version_h3_29, Counter, Total number of quic connections that use transport version h3-29. This is expected to be removed when this version is deprecated. quic_version_rfc_v1, Counter, Total number of quic connections that use transport version rfc-v1. diff --git a/docs/root/configuration/http/http_filters/_include/composite.yaml b/docs/root/configuration/http/http_filters/_include/composite.yaml index d45969ba43615..f7c4c4f0dbaea 100644 --- a/docs/root/configuration/http/http_filters/_include/composite.yaml +++ b/docs/root/configuration/http/http_filters/_include/composite.yaml @@ -1,5 +1,4 @@ admin: - access_log_path: /tmp/admin_access.log address: socket_address: {address: 0.0.0.0, port_value: 9901} @@ -33,7 +32,7 @@ static_resources: name: composite typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.composite.v3.Composite - matcher: + xds_matcher: matcher_tree: input: name: request-headers diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index a87b6447a2715..cb77f65eed621 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -43,6 +43,7 @@ HTTP filters router_filter set_metadata_filter squash_filter + sxg_filter tap_filter wasm_filter diff --git a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst index 905aaa6aecc43..b98e5f73edb91 100644 --- a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst +++ b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst @@ -40,6 +40,7 @@ JwtProvider * *forward*: if true, JWT will be forwarded to the upstream. * *from_headers*: extract JWT from HTTP headers. * *from_params*: extract JWT from query parameters. +* *from_cookies*: extract JWT from HTTP request cookies. * *forward_payload_header*: forward the JWT payload in the specified HTTP header. * *jwt_cache_config*: Enables JWT cache, its size can be specified by *jwt_cache_size*. Only valid JWT tokens are cached. diff --git a/docs/root/configuration/http/http_filters/router_filter.rst b/docs/root/configuration/http/http_filters/router_filter.rst index 20f3b365ba50f..afb61e99b25dd 100644 --- a/docs/root/configuration/http/http_filters/router_filter.rst +++ b/docs/root/configuration/http/http_filters/router_filter.rst @@ -360,6 +360,19 @@ or :ref:`regex_rewrite `, +but that configuration applies to all streams to this cluster. If set, this header will +override the cluster configuration. The value set for this header is set independently for other timeout related headers. + HTTP response headers set on downstream responses ------------------------------------------------- diff --git a/docs/root/configuration/http/http_filters/squash_filter.rst b/docs/root/configuration/http/http_filters/squash_filter.rst index 494f05f03b432..ff847f32327db 100644 --- a/docs/root/configuration/http/http_filters/squash_filter.rst +++ b/docs/root/configuration/http/http_filters/squash_filter.rst @@ -6,6 +6,8 @@ Squash Squash is an HTTP filter which enables Envoy to integrate with Squash microservices debugger. Code: https://github.com/solo-io/squash, API Docs: https://squash.solo.io/ +The Squash filter is only included in :ref:`contrib images ` + Overview -------- diff --git a/docs/root/configuration/http/http_filters/sxg_filter.rst b/docs/root/configuration/http/http_filters/sxg_filter.rst new file mode 100644 index 0000000000000..df85888621453 --- /dev/null +++ b/docs/root/configuration/http/http_filters/sxg_filter.rst @@ -0,0 +1,73 @@ + +.. _config_http_filters_sxg: + +SXG +====== + +* :ref:`v3 API reference ` +* This filter should be configured with the name *envoy.filters.http.sxg*. + +.. attention:: + + The SXG filter is experimental and is currently under active development. + +This filter generates a Signed HTTP Exchange (SXG) package from a downstream web application. It uses `libsxg `_ to perform the SXG packaging and signing, setting the Content-Type header to `application/signed-exchange;v=b3` and response body with the generated SXG document. + +The SXG filter is only included in :ref:`contrib images ` + +Transaction flow: + +* check accept request header for whether client can accept SXG and set a flag. ``x-envoy-client-can-accept-sxg`` (or the header defined in ``client_can_accept_sxg_header``) will be set on the request +* If ``x-envoy-should-encode-sxg`` (or the header defined in ``should_encode_sxg_header``) is present in the response headers set a flag +* If both flags are set, buffer response body until stream end and then replace response body with generated the SXG + +If there is an error generating the SXG package we fall back to the original HTML. + +For more information on Signed HTTP Exchanges see `this doc `_. + +Example configuration +--------------------- + +The following is an example configuring the filter. + +.. validated-code-block:: yaml + :type-name: envoy.extensions.filters.http.sxg.v3alpha.SXG + + cbor_url: "/.sxg/cert.cbor" + validity_url: "/.sxg/validity.msg" + certificate: + name: certificate + sds_config: + path: "/etc/envoy/sxg-certificate.yaml" + private_key: + name: private_key + sds_config: + path: "/etc/envoy/sxg-private-key.yaml" + duration: 432000s + mi_record_size: 1024 + client_can_accept_sxg_header: "x-custom-accept-sxg" + should_encode_sxg_header: "x-custom-should-encode" + header_prefix_filters: + - "x-foo-" + - "x-bar-" + +Notes +----- + +Instructions for generating a self-signed certificate and private key for testing can be found `here `__ + +Statistics +---------- + +The SXG filter outputs statistics in the *.sxg.* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + total_client_can_accept_sxg, Counter, Total requests where client passes valid Accept header for SXG documents. + total_should_sign, Counter, Total requests where downstream passes back header indicating Envoy should encocde document. + total_exceeded_max_payload_size, Counter, Total requests where response from downstream is to large. + total_signed_attempts, Counter, Total requests where SXG encoding is attempted. + total_signed_succeeded, Counter, Total requests where SXG encoding succeeds. + total_signed_failed, Counter, Total requests where SXG encoding fails. diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index c4767cc6c0f20..4ad0a29a9c14a 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -18,8 +18,10 @@ with the following statistics: downstream_cx_destroy, Counter, Total destroyed connections downstream_cx_active, Gauge, Total active connections downstream_cx_length_ms, Histogram, Connection length milliseconds + downstream_cx_transport_socket_connect_timeout, Counter, Total connections that timed out during transport socket connection negotiation downstream_cx_overflow, Counter, Total connections rejected due to enforcement of listener connection limit downstream_cx_overload_reject, Counter, Total connections rejected due to configured overload actions + downstream_global_cx_overflow, Counter, Total connections rejected due to enforcement of global connection limit downstream_pre_cx_timeout, Counter, Sockets that timed out during listener filter processing downstream_pre_cx_active, Gauge, Sockets currently undergoing listener filter processing global_cx_overflow, Counter, Total connections rejected due to enforcement of the global connection limit diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index deccf0abb0a9a..722ad1beb3999 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -444,6 +444,8 @@ The following command operators are supported: TCP Not implemented ("-"). +.. _config_access_log_format_dynamic_metadata: + %DYNAMIC_METADATA(NAMESPACE:KEY*):Z% HTTP :ref:`Dynamic Metadata ` info, @@ -456,9 +458,9 @@ The following command operators are supported: ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` * %DYNAMIC_METADATA(com.test.my_filter)% will log: ``{"test_key": "foo", "test_object": {"inner_key": "bar"}}`` - * %DYNAMIC_METADATA(com.test.my_filter:test_key)% will log: ``"foo"`` + * %DYNAMIC_METADATA(com.test.my_filter:test_key)% will log: ``foo`` * %DYNAMIC_METADATA(com.test.my_filter:test_object)% will log: ``{"inner_key": "bar"}`` - * %DYNAMIC_METADATA(com.test.my_filter:test_object:inner_key)% will log: ``"bar"`` + * %DYNAMIC_METADATA(com.test.my_filter:test_object:inner_key)% will log: ``bar`` * %DYNAMIC_METADATA(com.unknown_filter)% will log: ``-`` * %DYNAMIC_METADATA(com.test.my_filter:unknown_key)% will log: ``-`` * %DYNAMIC_METADATA(com.test.my_filter):25% will log (truncation at 25 characters): ``{"test_key": "foo", "test`` @@ -471,7 +473,13 @@ The following command operators are supported: For typed JSON logs, this operator renders a single value with string, numeric, or boolean type when the referenced key is a simple value. If the referenced key is a struct or list value, a JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum - length is ignored + length is ignored. + + .. note:: + + DYNAMIC_METADATA command operator will be deprecated in the future in favor of :ref:`METADATA` operator. + +.. _config_access_log_format_cluster_metadata: %CLUSTER_METADATA(NAMESPACE:KEY*):Z% HTTP @@ -484,9 +492,9 @@ The following command operators are supported: ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` * %CLUSTER_METADATA(com.test.my_filter)% will log: ``{"test_key": "foo", "test_object": {"inner_key": "bar"}}`` - * %CLUSTER_METADATA(com.test.my_filter:test_key)% will log: ``"foo"`` + * %CLUSTER_METADATA(com.test.my_filter:test_key)% will log: ``foo`` * %CLUSTER_METADATA(com.test.my_filter:test_object)% will log: ``{"inner_key": "bar"}`` - * %CLUSTER_METADATA(com.test.my_filter:test_object:inner_key)% will log: ``"bar"`` + * %CLUSTER_METADATA(com.test.my_filter:test_object:inner_key)% will log: ``bar`` * %CLUSTER_METADATA(com.unknown_filter)% will log: ``-`` * %CLUSTER_METADATA(com.test.my_filter:unknown_key)% will log: ``-`` * %CLUSTER_METADATA(com.test.my_filter):25% will log (truncation at 25 characters): ``{"test_key": "foo", "test`` @@ -499,7 +507,11 @@ The following command operators are supported: For typed JSON logs, this operator renders a single value with string, numeric, or boolean type when the referenced key is a simple value. If the referenced key is a struct or list value, a JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum - length is ignored + length is ignored. + + .. note:: + + CLUSTER_METADATA command operator will be deprecated in the future in favor of :ref:`METADATA` operator. .. _config_access_log_format_filter_state: diff --git a/docs/root/configuration/operations/overload_manager/overload_manager.rst b/docs/root/configuration/operations/overload_manager/overload_manager.rst index 53fab324e46c6..7f034f086151a 100644 --- a/docs/root/configuration/operations/overload_manager/overload_manager.rst +++ b/docs/root/configuration/operations/overload_manager/overload_manager.rst @@ -96,6 +96,10 @@ The following overload actions are supported: - Envoy will reduce the waiting period for a configured set of timeouts. See :ref:`below ` for details on configuration. + * - envoy.overload_actions.reset_high_memory_stream + - Envoy will reset expensive streams to terminate them. See + :ref:`below ` for details on configuration. + .. _config_overload_manager_reducing_timeouts: Reducing timeouts @@ -163,6 +167,85 @@ all listeners. An example configuration can be found in the :ref:`edge best practices document `. +.. _config_overload_manager_reset_streams: + +Reset Streams +^^^^^^^^^^^^^ + +.. warning:: + Resetting streams via an overload action currently only works with HTTP2. + +The ``envoy.overload_actions.reset_high_memory_stream`` overload action will reset +expensive streams. This requires `minimum_account_to_track_power_of_two` to be +configured via :ref:`buffer_factory_config +`. +To understand the memory class scheme in detail see :ref:`minimum_account_to_track_power_of_two +` + +As an example, here is a partial Overload Manager configuration with minimum +threshold for tracking and a single overload action entry that resets streams: + +.. code-block:: yaml + + buffer_factory_config: + minimum_account_to_track_power_of_two: 20 + actions: + name: "envoy.overload_actions.reset_high_memory_stream" + triggers: + - name: "envoy.resource_monitors.fixed_heap" + scaled: + scaling_threshold: 0.85 + saturation_threshold: 0.95 + ... + +We will only track streams using >= +:math:`2^minimum_account_to_track_power_of_two` worth of allocated memory in +buffers. In this case, by setting the `minimum_account_to_track_power_of_two` +to `20` we will track streams using >= 1MiB since :math:`2^20` is 1MiB. Streams +using >= 1MiB will be classified into 8 power of two sized buckets. Currently, +the number of buckets is hardcoded to 8. For this example, the buckets are as +follows: + +.. list-table:: + :header-rows: 1 + :widths: 1, 2 + + * - Bucket index + - Contains streams using + * - 0 + - [1MiB,2MiB) + * - 1 + - [2MiB,4MiB) + * - 2 + - [4MiB,8MiB) + * - 3 + - [8MiB,16MiB) + * - 4 + - [16MiB,32MiB) + * - 5 + - [32MiB,64MiB) + * - 6 + - [64MiB,128MiB) + * - 7 + - >= 128MiB + +The above configuration also configures the overload manager to reset our tracked +streams based on heap usage as a trigger. When the heap usage is less than 85%, +no streams will be reset. When heap usage is at or above 85%, we start to +reset buckets according to the strategy described below. When the heap +usage is at 95% all streams using >= 1MiB memory are eligible for reset. + +Given that there are only 8 buckets, we partition the space with a gradation of +:math:`gradation = (saturation_threshold - scaling_threshold)/8`. Hence at 85% +heap usage we reset streams in the last bucket e.g. those using `>= 128MiB`. At +:math:`85% + 1 * gradation` heap usage we reset streams in the last two buckets +e.g. those using `>= 64MiB`. And so forth as the heap usage is higher. + +It's expected that the first few gradations shouldn't trigger anything, unless +there's something seriously wrong e.g. in this example streams using `>= +128MiB` in buffers. + + Statistics ---------- diff --git a/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst b/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst index dae0be24d7429..97c4eb4161290 100644 --- a/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst +++ b/docs/root/configuration/other_protocols/thrift_filters/router_filter.rst @@ -23,6 +23,7 @@ The filter outputs generic routing error statistics in the *thrift. unknown_cluster, Counter, Total requests with a route that has an unknown cluster. upstream_rq_maintenance_mode, Counter, Total requests with a destination cluster in maintenance mode. no_healthy_upstream, Counter, Total requests with no healthy upstream endpoints available. + shadow_request_submit_failure, Counter, Total shadow requests that failed to be submitted. The filter is also responsible for cluster-level statistics derived from routed upstream clusters. diff --git a/docs/root/configuration/overview/examples.rst b/docs/root/configuration/overview/examples.rst index da4c30bebd1d9..b714f65e973f2 100644 --- a/docs/root/configuration/overview/examples.rst +++ b/docs/root/configuration/overview/examples.rst @@ -2,7 +2,7 @@ Examples -------- Below we will use YAML representation of the config protos and a running example -of a service proxying HTTP from 127.0.0.1:10000 to 127.0.0.2:1234. +of a service proxying HTTP from 127.0.0.1:10000 to 127.0.0.1:1234. Static ^^^^^^ @@ -152,7 +152,7 @@ In the above example, the EDS management server could then return a proto encodi - endpoint: address: socket_address: - address: 127.0.0.2 + address: 127.0.0.1 port_value: 1234 @@ -300,7 +300,7 @@ The management server could respond to EDS requests with: - endpoint: address: socket_address: - address: 127.0.0.2 + address: 127.0.0.1 port_value: 1234 Special YAML usage diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index 17b5e2da387ce..a7914866c29d6 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -114,7 +114,8 @@ HTTP/3 protocol stats are global with the following statistics: :header: Name, Type, Description :widths: 1, 1, 2 - .quic_connection_close_error_code_, Counter, A collection of counters that are lazily initialized to record each QUIC connection close's error code. + upstream..quic_connection_close_error_code_, Counter, A collection of counters that are lazily initialized to record each QUIC connection close's error code. + upstream..quic_reset_stream_error_code_, Counter, A collection of counters that are lazily initialized to record each QUIC stream reset error code. Health check statistics diff --git a/docs/root/extending/extending.rst b/docs/root/extending/extending.rst index aaf83ce7b516d..57134f2ec4fe7 100644 --- a/docs/root/extending/extending.rst +++ b/docs/root/extending/extending.rst @@ -19,7 +19,7 @@ types including: * :ref:`Stat sinks ` * :ref:`Tracers ` * :ref:`Request ID ` -* :ref:`Transport sockets ` +* Transport sockets * BoringSSL private key methods * :ref:`Watchdog action ` * :ref:`Internal redirect policy ` diff --git a/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst b/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst index 8bc05dc87adb1..724d7871f2cce 100644 --- a/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst +++ b/docs/root/faq/debugging/why_is_envoy_sending_http2_resets.rst @@ -17,6 +17,6 @@ from the file "source/common/http/http2/codec_impl.cc" of the form for example: `invalid http2: Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], value: [3]` -You can also check :ref:`HTTP/2 stats``: in many cases where +You can also check :ref:`HTTP/2 stats `: in many cases where Envoy resets streams, for example if there are more headers than allowed by configuration or flood detection kicks in, http2 counters will be incremented as the streams are reset. diff --git a/docs/root/faq/windows/win_not_supported_features.rst b/docs/root/faq/windows/win_not_supported_features.rst index 21685e8c97b0b..8e002a1f182c6 100644 --- a/docs/root/faq/windows/win_not_supported_features.rst +++ b/docs/root/faq/windows/win_not_supported_features.rst @@ -8,6 +8,7 @@ The most notable features that are not supported on Windows are: * :ref:`Tracers ` * :ref:`Original Src HTTP Filter `. * :ref:`Hot restart ` +* :ref:`Signed Exchange Filter ` There are certain Envoy features that require newer versions of Windows. These features explicitly document the required version. diff --git a/docs/root/intro/arch_overview/advanced/matching/_include/complicated.yaml b/docs/root/intro/arch_overview/advanced/matching/_include/complicated.yaml index 9efc4cbf217e8..6111adfd23eea 100644 --- a/docs/root/intro/arch_overview/advanced/matching/_include/complicated.yaml +++ b/docs/root/intro/arch_overview/advanced/matching/_include/complicated.yaml @@ -29,7 +29,7 @@ static_resources: percentage: numerator: 0 denominator: HUNDRED - matcher: + xds_matcher: # The top level matcher is a matcher tree which conceptually selects one of several subtrees. matcher_tree: input: diff --git a/docs/root/intro/arch_overview/advanced/matching/_include/request_response.yaml b/docs/root/intro/arch_overview/advanced/matching/_include/request_response.yaml index bf4721e48e24f..5fc3a5c3e8ecb 100644 --- a/docs/root/intro/arch_overview/advanced/matching/_include/request_response.yaml +++ b/docs/root/intro/arch_overview/advanced/matching/_include/request_response.yaml @@ -29,7 +29,7 @@ static_resources: percentage: numerator: 0 denominator: HUNDRED - matcher: + xds_matcher: matcher_list: matchers: - predicate: diff --git a/docs/root/intro/arch_overview/advanced/matching/_include/simple.yaml b/docs/root/intro/arch_overview/advanced/matching/_include/simple.yaml index 836deb8191825..1433fa75d1089 100644 --- a/docs/root/intro/arch_overview/advanced/matching/_include/simple.yaml +++ b/docs/root/intro/arch_overview/advanced/matching/_include/simple.yaml @@ -29,7 +29,7 @@ static_resources: percentage: numerator: 0 denominator: HUNDRED - matcher: + xds_matcher: matcher_tree: input: name: request-headers diff --git a/docs/root/intro/arch_overview/http/http3.rst b/docs/root/intro/arch_overview/http/http3.rst index 6c1da39eca24d..a5548120c6f38 100644 --- a/docs/root/intro/arch_overview/http/http3.rst +++ b/docs/root/intro/arch_overview/http/http3.rst @@ -50,4 +50,4 @@ Either configuring HTTP/3 explicitly on, or using the auto_http option to use HT See :ref:`here ` for more information about HTTP/3 connection pooling, including detailed information of where QUIC will be used, and how it fails over to TCP when QUIC use is configured to be optional. -An example upstream HTTP/3 configuration file can be found :repo:`here . +An example upstream HTTP/3 configuration file can be found :repo:`here `. diff --git a/docs/root/intro/arch_overview/intro/threading_model.rst b/docs/root/intro/arch_overview/intro/threading_model.rst index ca83cb92e92c5..18f2e822498f1 100644 --- a/docs/root/intro/arch_overview/intro/threading_model.rst +++ b/docs/root/intro/arch_overview/intro/threading_model.rst @@ -24,3 +24,7 @@ to have Envoy forcibly balance connections between worker threads. To support th Envoy allows for different types of :ref:`connection balancing ` to be configured on each :ref:`listener `. + +On Windows the kernel is not able to balance the connections properly with the async IO model that Envoy is using. +Until this is fixed by the platfrom, Envoy will enforce listener connection balancing on Windows. This allows us to +balance connections between different worker threads. This behavior comes with a performance penalty. diff --git a/docs/root/intro/arch_overview/operations/draining.rst b/docs/root/intro/arch_overview/operations/draining.rst index 307f0b3556e3d..7b03a35ff4d56 100644 --- a/docs/root/intro/arch_overview/operations/draining.rst +++ b/docs/root/intro/arch_overview/operations/draining.rst @@ -24,10 +24,12 @@ for some duration of time prior to server shutdown, use :ref:`drain_listeners `. By default, Envoy -will discourage requests for some period of time (as determined by :option:`--drain-time-s`). -The behaviour of request discouraging is determined by the drain manager. +To add a graceful drain period prior to listeners being closed, use the query +parameter :ref:`drain_listeners?graceful `. +By default, Envoy will discourage requests for some period of time (as +determined by :option:`--drain-time-s`) but continue accepting new connections +until the drain timeout. The behaviour of request discouraging is determined by +the drain manager. Note that although draining is a per-listener concept, it must be supported at the network filter level. Currently the only filters that support graceful draining are diff --git a/docs/root/intro/arch_overview/operations/hot_restart.rst b/docs/root/intro/arch_overview/operations/hot_restart.rst index 9802609fca2a7..27b2fad02fd1e 100644 --- a/docs/root/intro/arch_overview/operations/hot_restart.rst +++ b/docs/root/intro/arch_overview/operations/hot_restart.rst @@ -9,10 +9,11 @@ Envoy can fully reload itself (both code and configuration) without dropping exi during the :ref:`drain process `. The hot restart functionality has the following general architecture: -* Statistics and some locks are kept in a shared memory region. This means that gauges will be - consistent across both processes as restart is taking place. * The two active processes communicate with each other over unix domain sockets using a basic RPC - protocol. + protocol. All counters are sent from the old process to the new process over the unix domain, and + gauges are transported except those marked with `NeverImport`. After hot restart is finished, the + gauges transported from the old process will be cleanup, but special gauge like + :ref:`server.hot_restart_generation statistic ` is retained. * The new process fully initializes itself (loads the configuration, does an initial service discovery and health checking phase, etc.) before it asks for copies of the listen sockets from the old process. The new process starts listening and then tells the old process to start diff --git a/docs/root/intro/arch_overview/security/_include/ssl.yaml b/docs/root/intro/arch_overview/security/_include/ssl.yaml index 205142dcbb019..6f666e4d92a55 100644 --- a/docs/root/intro/arch_overview/security/_include/ssl.yaml +++ b/docs/root/intro/arch_overview/security/_include/ssl.yaml @@ -38,7 +38,7 @@ static_resources: - endpoint: address: socket_address: - address: 127.0.0.2 + address: 127.0.0.1 port_value: 1234 transport_socket: name: envoy.transport_sockets.tls diff --git a/docs/root/intro/arch_overview/security/ssl.rst b/docs/root/intro/arch_overview/security/ssl.rst index 4c770e00341b4..e1192833232c9 100644 --- a/docs/root/intro/arch_overview/security/ssl.rst +++ b/docs/root/intro/arch_overview/security/ssl.rst @@ -77,7 +77,7 @@ Example configuration */etc/ssl/certs/ca-certificates.crt* is the default path for the system CA bundle on Debian systems. :ref:`trusted_ca ` along with :ref:`match_subject_alt_names ` -makes Envoy verify the server identity of *127.0.0.2:1234* as "foo" in the same way as e.g. cURL +makes Envoy verify the server identity of *127.0.0.1:1234* as "foo" in the same way as e.g. cURL does on standard Debian installations. Common paths for system CA bundles on Linux and BSD are: * /etc/ssl/certs/ca-certificates.crt (Debian/Ubuntu/Gentoo etc.) diff --git a/docs/root/intro/arch_overview/security/threat_model.rst b/docs/root/intro/arch_overview/security/threat_model.rst index 66b906f1cb281..93aa6530b545e 100644 --- a/docs/root/intro/arch_overview/security/threat_model.rst +++ b/docs/root/intro/arch_overview/security/threat_model.rst @@ -83,6 +83,11 @@ and do not qualify for treatment under the threat model below. As a consequence, with this model in mind. Security issues related to core code will usually trigger the security release process as described in this document. +.. note:: + + :ref:`contrib ` extensions are noted below and are not officially covered by + the threat model or the Envoy security team. All indications below are best effort. + The following extensions are intended to be hardened against untrusted downstream and upstreams: .. include:: secpos_robust_to_untrusted_downstream_and_upstream.rst diff --git a/docs/root/intro/arch_overview/upstream/connection_pooling.rst b/docs/root/intro/arch_overview/upstream/connection_pooling.rst index 0c1a26d79dca9..147171f498e13 100644 --- a/docs/root/intro/arch_overview/upstream/connection_pooling.rst +++ b/docs/root/intro/arch_overview/upstream/connection_pooling.rst @@ -57,9 +57,9 @@ Automatic protocol selection ---------------------------- For Envoy acting as a forward proxy, the preferred configuration is the -`AutoHttpConfig ` +:ref:`AutoHttpConfig ` , configued via -`http_protocol_options `. +:ref:`http_protocol_options `. By default it will use TCP and ALPN to select the best available protocol of HTTP/2 and HTTP/1.1. .. _arch_overview_http3_upstream: diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 34e50110842a9..72d243f0868d3 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -31,13 +31,6 @@ following are the command line options that Envoy supports. ./envoy -c bootstrap.yaml --config-yaml "node: {id: 'node1'}" -.. option:: --bootstrap-version - - *(optional)* The API version to load the bootstrap as. The value should be a single integer, e.g. - to parse the bootstrap configuration as V3, specify ``--bootstrap-version 3``. If unset, Envoy will - attempt to load the bootstrap as the previous API version and upgrade it to the latest. If that fails, - Envoy will attempt to load the configuration as the latest version. - .. option:: --mode *(optional)* One of the operating modes for Envoy: diff --git a/docs/root/start/install.rst b/docs/root/start/install.rst index 752075ad4f81a..30d6a17c03503 100644 --- a/docs/root/start/install.rst +++ b/docs/root/start/install.rst @@ -13,47 +13,50 @@ getting your Envoy proxy up and running. Install Envoy on Debian GNU/Linux ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can `install Envoy on Debian `_ -using `Get Envoy `__ until `official packages exist `_. +You can `install Envoy on Debian using packages created by Tetrate `_ +until `official packages exist `_. .. code-block:: console $ sudo apt update - $ sudo apt install apt-transport-https ca-certificates curl gnupg2 software-properties-common - $ curl -sL 'https://getenvoy.io/gpg' | sudo gpg --dearmor -o /usr/share/keyrings/getenvoy-keyring.gpg + $ sudo apt install debian-keyring debian-archive-keyring apt-transport-https curl lsb-release + $ curl -sL 'https://deb.dl.getenvoy.io/public/gpg.8115BA8E629CC074.key' | sudo gpg --dearmor -o /usr/share/keyrings/getenvoy-keyring.gpg # Verify the keyring - this should yield "OK" - $ echo 1a2f6152efc6cc39e384fb869cdf3cc3e4e1ac68f4ad8f8f114a7c58bb0bea01 /usr/share/keyrings/getenvoy-keyring.gpg | sha256sum --check - $ echo "deb [arch=amd64 signed-by=/usr/share/keyrings/getenvoy-keyring.gpg] https://dl.bintray.com/tetrate/getenvoy-deb $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/getenvoy.list + $ echo a077cb587a1b622e03aa4bf2f3689de14658a9497a9af2c427bba5f4cc3c4723 /usr/share/keyrings/getenvoy-keyring.gpg | sha256sum --check + $ echo "deb [arch=amd64 signed-by=/usr/share/keyrings/getenvoy-keyring.gpg] https://deb.dl.getenvoy.io/public/deb/debian $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/getenvoy.list $ sudo apt update $ sudo apt install getenvoy-envoy Install Envoy on Ubuntu Linux ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can `install Envoy on Ubuntu `_ -using `Get Envoy `__ until `official packages exist `_. +You can `install Envoy on Ubuntu using packages created by Tetrate `_ +until `official packages exist `_. .. code-block:: console $ sudo apt update - $ sudo apt install apt-transport-https ca-certificates curl gnupg-agent software-properties-common - $ curl -sL 'https://getenvoy.io/gpg' | sudo gpg --dearmor -o /usr/share/keyrings/getenvoy-keyring.gpg + $ sudo apt install apt-transport-https gnupg2 curl lsb-release + $ curl -sL 'https://deb.dl.getenvoy.io/public/gpg.8115BA8E629CC074.key' | sudo gpg --dearmor -o /usr/share/keyrings/getenvoy-keyring.gpg # Verify the keyring - this should yield "OK" - $ echo 1a2f6152efc6cc39e384fb869cdf3cc3e4e1ac68f4ad8f8f114a7c58bb0bea01 /usr/share/keyrings/getenvoy-keyring.gpg | sha256sum --check - $ echo "deb [arch=amd64 signed-by=/usr/share/keyrings/getenvoy-keyring.gpg] https://dl.bintray.com/tetrate/getenvoy-deb $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/getenvoy.list + $ echo a077cb587a1b622e03aa4bf2f3689de14658a9497a9af2c427bba5f4cc3c4723 /usr/share/keyrings/getenvoy-keyring.gpg | sha256sum --check + $ echo "deb [arch=amd64 signed-by=/usr/share/keyrings/getenvoy-keyring.gpg] https://deb.dl.getenvoy.io/public/deb/ubuntu $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/getenvoy.list $ sudo apt update $ sudo apt install -y getenvoy-envoy Install Envoy on RPM-based distros ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can `install Envoy on Centos/Redhat Enterprise Linux (RHEL) `_ -using `Get Envoy `__ until `official packages exist `_. +You can install Envoy on Centos/Redhat Enterprise Linux (RHEL) using `packages created by Tetrate `_ +until `official packages exist `_. .. code-block:: console $ sudo yum install yum-utils - $ sudo yum-config-manager --add-repo https://getenvoy.io/linux/rpm/tetrate-getenvoy.repo + $ sudo rpm --import 'https://rpm.dl.getenvoy.io/public/gpg.CF716AF503183491.key' + $ curl -sL 'https://rpm.dl.getenvoy.io/public/config.rpm.txt?distro=el&codename=7' > /tmp/tetrate-getenvoy-rpm-stable.repo + $ sudo yum-config-manager --add-repo '/tmp/tetrate-getenvoy-rpm-stable.repo' + $ sudo yum makecache --disablerepo='*' --enablerepo='tetrate-getenvoy-rpm-stable' $ sudo yum install getenvoy-envoy .. _start_install_macosx: @@ -107,6 +110,20 @@ The following commands will pull and show the Envoy version of current images. $ docker pull envoyproxy/|envoy_distroless_docker_image| $ docker run --rm envoyproxy/|envoy_distroless_docker_image| --version +.. _install_contrib: + +Contrib images +~~~~~~~~~~~~~~ + +As described in `this document `_, +the Envoy project allows extensions to enter the repository as "contrib" extensions. The requirements +for such extensions are lower, and as such they are only available by default in special images. +The `envoyproxy/envoy-contrib `_ image +contains all contrib extensions on top of an Ubuntu base. The +`envoyproxy/envoy-contrib-debug `_ +image contains all contrib extensions on top of an Ubuntu base as well as debug symbols. Throughout +the documentation, extensions are clearly marked as being a contrib extension or a core extension. + .. _install_binaries: Pre-built Envoy Docker images @@ -137,6 +154,12 @@ The following table shows the available Docker images - |DOCKER_IMAGE_TAG_NAME| - - + * - `envoyproxy/envoy-contrib `_ + - Release :ref:`contrib ` binary with symbols stripped on top of an Ubuntu Bionic base. + - |DOCKER_IMAGE_TAG_NAME| + - |DOCKER_IMAGE_TAG_NAME| + - + - * - `envoyproxy/envoy-distroless `_ - Release binary with symbols stripped on top of a distroless base. - |DOCKER_IMAGE_TAG_NAME| @@ -161,12 +184,24 @@ The following table shows the available Docker images - |DOCKER_IMAGE_TAG_NAME| - - + * - `envoyproxy/envoy-contrib-debug `_ + - Release :ref:`contrib ` binary with debug symbols on top of an Ubuntu Bionic base. + - |DOCKER_IMAGE_TAG_NAME| + - |DOCKER_IMAGE_TAG_NAME| + - + - * - `envoyproxy/envoy-dev `_ - Release binary with symbols stripped on top of an Ubuntu Bionic base. - - - latest - latest + * - `envoyproxy/envoy-contrib-dev `_ + - Release :ref:`contrib ` binary with symbols stripped on top of an Ubuntu Bionic base. + - + - + - latest + - latest * - `envoyproxy/envoy-distroless-dev `_ - Release binary with symbols stripped on top of a distroless base. - @@ -185,6 +220,12 @@ The following table shows the available Docker images - - latest - latest + * - `envoyproxy/envoy-contrib-debug-dev `_ + - Release :ref:`contrib ` binary with debug symbols on top of an Ubuntu Bionic base. + - + - + - latest + - latest * - `envoyproxy/envoy-windows-dev `_ - Release binary with symbols stripped on top of a Windows Server 1809 base. Includes build tools. - diff --git a/docs/root/start/sandboxes/cache.rst b/docs/root/start/sandboxes/cache.rst index 181b36c657448..7237a3fd46865 100644 --- a/docs/root/start/sandboxes/cache.rst +++ b/docs/root/start/sandboxes/cache.rst @@ -50,8 +50,8 @@ Change to the ``examples/cache`` directory. Name Command State Ports ---------------------------------------------------------------------------------------------- cache_front-envoy_1 /docker-entrypoint.sh /bin ... Up 10000/tcp, 0.0.0.0:8000->8000/tcp - cache_service1_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp - cache_service2_1 /bin/sh -c /usr/local/bin/ ... Up 10000/tcp, 8000/tcp + cache_service1_1 python3 /code/service.py Up + cache_service2_1 python3 /code/service.py Up Step 2: Test Envoy's HTTP caching capabilities ********************************************** diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 0a4f5b399f64f..a13ea3b633676 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -5,6 +5,20 @@ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* +* config: the ``--bootstrap-version`` CLI flag has been removed, Envoy has only been able to accept v3 + bootstrap configurations since 1.18.0. +* contrib: the :ref:`squash filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`kafka broker filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`RocketMQ proxy filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`Postgres proxy filter ` has been moved to + :ref:`contrib images `. +* contrib: the :ref:`MySQL proxy filter ` has been moved to + :ref:`contrib images `. +* ext_authz: fixed skipping authentication when returning either a direct response or a redirect. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect`` runtime guard to false. + Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* @@ -19,27 +33,46 @@ Minor Behavior Changes * http: correct the use of the ``x-forwarded-proto`` header and the ``:scheme`` header. Where they differ (which is rare) ``:scheme`` will now be used for serving redirect URIs and cached content. This behavior can be reverted by setting runtime guard ``correct_scheme_and_xfp`` to false. +* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request + URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed + to stripping the #fragment instead by setting the runtime guard ``envoy.reloadable_features.http_reject_path_with_fragment`` + to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard + ``envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled``. This runtime guard must only be set + to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request + authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. * http: set the default :ref:`lazy headermap threshold ` to 3, which defines the minimal number of headers in a request/response/trailers required for using a - dictionary in addition to the list. Setting the `envoy.http.headermap.lazy_map_min_size` runtime + dictionary in addition to the list. Setting the ``envoy.http.headermap.lazy_map_min_size`` runtime feature to a non-negative number will override the default value. +* http: stop processing pending H/2 frames if connection transitioned to a closed state. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.skip_dispatching_frames_for_closed_connection`` to false. * listener: added the :ref:`enable_reuse_port ` field and changed the default for reuse_port from false to true, as the feature is now well supported on the majority of production Linux kernels in use. The default change is aware of hot restart, as otherwise the change would not be backwards compatible between restarts. This means that hot restarting on to a new binary will retain the default of false until the binary undergoes a full restart. To retain the previous behavior, either explicitly set the new configuration - field to false, or set the runtime feature flag `envoy.reloadable_features.listener_reuse_port_default_enabled` + field to false, or set the runtime feature flag ``envoy.reloadable_features.listener_reuse_port_default_enabled`` to false. As part of this change, the use of reuse_port for TCP listeners on both macOS and Windows has been disabled due to suboptimal behavior. See the field documentation for more information. +* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in place update. +* quic: enables IETF connection migration. This feature requires stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* -* access log: fix `%UPSTREAM_CLUSTER%` when used in http upstream access logs. Previously, it was always logging as an unset value. +* access log: fix ``%UPSTREAM_CLUSTER%`` when used in http upstream access logs. Previously, it was always logging as an unset value. +* access log: fix ``%UPSTREAM_CLUSTER%`` when used in http upstream access logs. Previously, it was always logging as an unset value. +* aws request signer: fix the AWS Request Signer extension to correctly normalize the path and query string to be signed according to AWS' guidelines, so that the hash on the server side matches. See `AWS SigV4 documentaion `_. * cluster: delete pools when they're idle to fix unbounded memory use when using PROXY protocol upstream with tcp_proxy. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.conn_pool_delete_when_idle`` runtime guard to false. +* cluster: finish cluster warming even if hosts are removed before health check initialization. This only affected clusters with :ref:`ignore_health_on_host_removal `. +* dynamic forward proxy: fixing a validation bug where san and sni checks were not applied setting :ref:`http_protocol_options ` via :ref:`typed_extension_protocol_options `. +* ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. +* ext_authz: the network ext_authz filter now correctly sets dynamic metdata returned by the authorization service for non-OK responses. This behavior now matches the http ext_authz filter. +* hcm: remove deprecation for :ref:`xff_num_trusted_hops ` and forbid mixing ip detection extensions with old related knobs. +* http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. +* listener: fixed an issue on Windows where connections are not handled by all worker threads. * xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under 'annotations' section of the segment data. Removed Config or Runtime @@ -49,17 +82,35 @@ Removed Config or Runtime * http: removed ``envoy.reloadable_features.http_upstream_wait_connect_response`` runtime guard and legacy code paths. * http: removed ``envoy.reloadable_features.allow_preconnect`` runtime guard and legacy code paths. * listener: removed ``envoy.reloadable_features.disable_tls_inspector_injection`` runtime guard and legacy code paths. +* ocsp: removed ``envoy.reloadable_features.check_ocsp_policy deprecation`` runtime guard and legacy code paths. +* ocsp: removed ``envoy.reloadable_features.require_ocsp_response_for_must_staple_certs deprecation`` and legacy code paths. +* quic: removed ``envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing`` runtime guard. New Features ------------ +* access_log: added :ref:`METADATA` token to handle all types of metadata (DYNAMIC, CLUSTER, ROUTE). * bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. +* contrib: added new :ref:`contrib images ` which contain contrib extensions. +* grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. * http: added :ref:`string_match ` in the header matcher. +* http: added :ref:`x-envoy-upstream-stream-duration-ms ` that allows configuring the max stream duration via a request header. * http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. - +* http: sanitizing the referer header as documented :ref:`here `. This feature can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.sanitize_http_header_referer`` to false. * jwt_authn: added support for :ref:`Jwt Cache ` and its size can be specified by :ref:`jwt_cache_size `. +* jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. +* listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. +* matcher: added :ref:`invert ` for inverting the match result in the metadata matcher. +* overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. +* rbac: added :ref:`destination_port_range ` for matching range of destination ports. +* route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. +* sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. +* thrift_proxy: added support for :ref:`mirroring requests `. Deprecated ---------- + +* api: the :ref:`matcher ` field has been deprecated in favor of + :ref:`matcher ` in order to break a build dependency. * cluster: :ref:`max_requests_per_connection ` is deprecated in favor of :ref:`max_requests_per_connection `. * http: the HeaderMatcher fields :ref:`exact_match `, :ref:`safe_regex_match `, :ref:`prefix_match `, :ref:`suffix_match ` and @@ -67,4 +118,3 @@ Deprecated * listener: :ref:`reuse_port ` has been deprecated in favor of :ref:`enable_reuse_port `. At the same time, the default has been changed from false to true. See above for more information. - diff --git a/docs/root/version_history/v1.15.5.rst b/docs/root/version_history/v1.15.5.rst new file mode 100644 index 0000000000000..ca0fcbab5d56f --- /dev/null +++ b/docs/root/version_history/v1.15.5.rst @@ -0,0 +1,16 @@ +1.15.5 (May 11, 2021) +======================= + +Changes +------- + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +New Features +------------ +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. + +Deprecated +---------- diff --git a/docs/root/version_history/v1.16.4.rst b/docs/root/version_history/v1.16.4.rst new file mode 100644 index 0000000000000..4864d77cc3a7b --- /dev/null +++ b/docs/root/version_history/v1.16.4.rst @@ -0,0 +1,26 @@ +1.16.4 (May 11, 2021) +======================= + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +New Features +------------ + +* http: added the ability to :ref:`unescape slash sequences` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. + +Deprecated +---------- diff --git a/docs/root/version_history/v1.16.5.rst b/docs/root/version_history/v1.16.5.rst new file mode 100644 index 0000000000000..fcf1017a2cd41 --- /dev/null +++ b/docs/root/version_history/v1.16.5.rst @@ -0,0 +1,34 @@ +1.16.5 (Aug 24, 2021) +======================= + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request + URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed + to stripping the #fragment instead by setting the runtime guard `envoy.reloadable_features.http_reject_path_with_fragment` + to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard + `envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled`. This runtime guard must only be set + to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request + authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +New Features +------------ + +Deprecated +---------- diff --git a/docs/root/version_history/v1.17.0.rst b/docs/root/version_history/v1.17.0.rst index ace3ea40fd979..1d612e7573124 100644 --- a/docs/root/version_history/v1.17.0.rst +++ b/docs/root/version_history/v1.17.0.rst @@ -5,7 +5,7 @@ Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* -* config: v2 is now fatal-by-default. This may be overridden by setting :option:`--bootstrap-version` 2 on the CLI for a v2 bootstrap file and also enabling the runtime ``envoy.reloadable_features.enable_deprecated_v2_api`` feature. +* config: v2 is now fatal-by-default. This may be overridden by setting ``--bootstrap-version 2`` on the CLI for a v2 bootstrap file and also enabling the runtime ``envoy.reloadable_features.enable_deprecated_v2_api`` feature. Minor Behavior Changes ---------------------- diff --git a/docs/root/version_history/v1.17.3.rst b/docs/root/version_history/v1.17.3.rst new file mode 100644 index 0000000000000..95fff3704c8ec --- /dev/null +++ b/docs/root/version_history/v1.17.3.rst @@ -0,0 +1,25 @@ +1.17.3 (May 11, 2021) +======================= + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +New Features +------------ +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. + +Deprecated +---------- diff --git a/docs/root/version_history/v1.17.4.rst b/docs/root/version_history/v1.17.4.rst new file mode 100644 index 0000000000000..2535eb913a57c --- /dev/null +++ b/docs/root/version_history/v1.17.4.rst @@ -0,0 +1,37 @@ +1.17.4 (Aug 24, 2021) +===================== + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request + URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed + to stripping the #fragment instead by setting the runtime guard `envoy.reloadable_features.http_reject_path_with_fragment` + to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard + `envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled`. This runtime guard must only be set + to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request + authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. +* http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. +* jwt_authn: unauthorized responses now correctly include a `www-authenticate` header. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +New Features +------------ +* listener: added an option when balancing across active listeners and wildcard matching is used to return the listener that matches the IP family type associated with the listener's socket address. It is off by default, but is turned on by default in v1.19. To set change the runtime guard `envoy.reloadable_features.listener_wildcard_match_ip_family` to true. + +Deprecated +---------- diff --git a/docs/root/version_history/v1.18.3.rst b/docs/root/version_history/v1.18.3.rst new file mode 100644 index 0000000000000..509d0ebc7cf0a --- /dev/null +++ b/docs/root/version_history/v1.18.3.rst @@ -0,0 +1,30 @@ +1.18.3 (May 11, 2021) +===================== + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* zipkin: fix timestamp serializaiton in annotations. A prior bug fix exposed an issue with timestamps being serialized as strings. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +* tls: removed `envoy.reloadable_features.tls_use_io_handle_bio` runtime guard and legacy code path. + +New Features +------------ + +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action ` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling ` runtime variable. + +Deprecated +---------- diff --git a/docs/root/version_history/v1.18.4.rst b/docs/root/version_history/v1.18.4.rst new file mode 100644 index 0000000000000..9e66511ff8070 --- /dev/null +++ b/docs/root/version_history/v1.18.4.rst @@ -0,0 +1,41 @@ +1.18.4 (Aug 24, 2021) +===================== + +Incompatible Behavior Changes +----------------------------- +*Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* http: disable the integration between :ref:`ExtensionWithMatcher ` + and HTTP filters by default to reflects its experimental status. This feature can be enabled by seting + `envoy.reloadable_features.experimental_matching_api` to true. +* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request + URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed + to stripping the #fragment instead by setting the runtime guard `envoy.reloadable_features.http_reject_path_with_fragment` + to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard + `envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled`. This runtime guard must only be set + to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request + authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. +* http: stop processing pending H/2 frames if connection transitioned to a closed state. This behavior can be temporarily reverted by setting the `envoy.reloadable_features.skip_dispatching_frames_for_closed_connection` to false. + +Bug Fixes +--------- +*Changes expected to improve the state of the world and are unlikely to have negative effects* + +* ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. +* http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. +* jwt_authn: unauthorized responses now correctly include a `www-authenticate` header. + +Removed Config or Runtime +------------------------- +*Normally occurs at the end of the* :ref:`deprecation period ` + +New Features +------------ +* listener: added an option when balancing across active listeners and wildcard matching is used to return the listener that matches the IP family type associated with the listener's socket address. It is off by default, but is turned on by default in v1.19. To set change the runtime guard `envoy.reloadable_features.listener_wildcard_match_ip_family` to true. + +Deprecated +---------- diff --git a/docs/root/version_history/v1.19.0.rst b/docs/root/version_history/v1.19.0.rst index 2743d50df1015..581a535a17df6 100644 --- a/docs/root/version_history/v1.19.0.rst +++ b/docs/root/version_history/v1.19.0.rst @@ -15,7 +15,7 @@ Minor Behavior Changes * access_log: added new access_log command operator ``%REQUEST_TX_DURATION%``. * access_log: removed extra quotes on metadata string values. This behavior can be temporarily reverted by setting ``envoy.reloadable_features.unquote_log_string_values`` to false. -* admission control: added :ref:`max_rejection_probability ` which defaults to 80%, which means that the upper limit of the default rejection probability of the filter is changed from 100% to 80%. +* admission control: added :ref:`max_rejection_probability ` which defaults to 80%, which means that the upper limit of the default rejection probability of the filter is changed from 100% to 80%. * aws_request_signing: requests are now buffered by default to compute signatures which include the payload hash, making the filter compatible with most AWS services. Previously, requests were never buffered, which only produced correct signatures for requests without a body, or for @@ -23,10 +23,10 @@ Minor Behavior Changes be now be disabled in favor of using unsigned payloads with compatible services via the new ``use_unsigned_payload`` filter option (default false). * cache filter: serve HEAD requests from cache. -* cluster: added default value of 5 seconds for :ref:`connect_timeout `. +* cluster: added default value of 5 seconds for :ref:`connect_timeout `. * dns: changed apple resolver implementation to not reuse the UDS to the local DNS daemon. -* dns cache: the new :ref:`dns_query_timeout ` option has a default of 5s. See below for more information. -* http: disable the integration between :ref:`ExtensionWithMatcher ` +* dns cache: the new :ref:`dns_query_timeout ` option has a default of 5s. See below for more information. +* http: disable the integration between :ref:`ExtensionWithMatcher ` and HTTP filters by default to reflect its experimental status. This feature can be enabled by setting ``envoy.reloadable_features.experimental_matching_api`` to true. * http: replaced setting ``envoy.reloadable_features.strict_1xx_and_204_response_headers`` with settings @@ -38,7 +38,7 @@ Minor Behavior Changes ``envoy.reloadable_features.no_chunked_encoding_header_for_304`` to false. * http: the behavior of the ``present_match`` in route header matcher changed. The value of ``present_match`` was ignored in the past. The new behavior is ``present_match`` is performed when the value is true. An absent match performed when the value is false. Please reference :ref:`present_match `. -* listener: respect the :ref:`connection balance config ` +* listener: respect the :ref:`connection balance config ` defined within the listener where the sockets are redirected to. Clear that field to restore the previous behavior. * listener: when balancing across active listeners and wildcard matching is used, the behavior has been changed to return the listener that matches the IP family type associated with the listener's socket address. Any unexpected behavioral changes can be reverted by setting runtime guard ``envoy.reloadable_features.listener_wildcard_match_ip_family`` to false. * tcp: switched to the new connection pool by default. Any unexpected behavioral changes can be reverted by setting runtime guard ``envoy.reloadable_features.new_tcp_connection_pool`` to false. @@ -49,7 +49,7 @@ Bug Fixes *Changes expected to improve the state of the world and are unlikely to have negative effects* * aws_lambda: if ``payload_passthrough`` is set to ``false``, the downstream response content-type header will now be set from the content-type entry in the JSON response's headers map, if present. -* cluster: fixed the :ref:`cluster stats ` histograms by moving the accounting into the router +* cluster: fixed the :ref:`cluster stats ` histograms by moving the accounting into the router filter. This means that we now properly compute the number of bytes sent as well as handling retries which were previously ignored. * hot_restart: fix double counting of ``server.seconds_until_first_ocsp_response_expiring`` and ``server.days_until_first_cert_expiring`` during hot-restart. This stat was only incorrect until the parent process terminated. * http: fix erroneous handling of invalid nghttp2 frames with the ``NGHTTP2_ERR_REFUSED_STREAM`` error. Prior to the fix, @@ -65,15 +65,15 @@ Bug Fixes Removed Config or Runtime ------------------------- -*Normally occurs at the end of the* :ref:`deprecation period ` +*Normally occurs at the end of the* :ref:`deprecation period ` * event: removed ``envoy.reloadable_features.activate_timers_next_event_loop`` runtime guard and legacy code path. * gzip: removed legacy HTTP Gzip filter and runtime guard ``envoy.deprecated_features.allow_deprecated_gzip_http_filter``. * http: removed ``envoy.reloadable_features.allow_500_after_100`` runtime guard and the legacy code path. * http: removed ``envoy.reloadable_features.always_apply_route_header_rules`` runtime guard and legacy code path. -* http: removed ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` for disabling closing HTTP/1.1 connections on error. Connection-closing can still be disabled by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message `. +* http: removed ``envoy.reloadable_features.hcm_stream_error_on_invalid_message`` for disabling closing HTTP/1.1 connections on error. Connection-closing can still be disabled by setting the HTTP/1 configuration :ref:`override_stream_error_on_invalid_http_message `. * http: removed ``envoy.reloadable_features.http_set_copy_replace_all_headers`` runtime guard and legacy code paths. -* http: removed ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2``; Envoy will now always send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. +* http: removed ``envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2``; Envoy will now always send GOAWAY to HTTP2 downstreams when the :ref:`disable_keepalive ` overload action is active. * http: removed ``envoy.reloadable_features.http_match_on_all_headers`` runtime guard and legacy code paths. * http: removed ``envoy.reloadable_features.unify_grpc_handling`` runtime guard and legacy code paths. * tls: removed ``envoy.reloadable_features.tls_use_io_handle_bio`` runtime guard and legacy code path. @@ -81,62 +81,62 @@ Removed Config or Runtime New Features ------------ -* access_log: added the new response flag for :ref:`overload manager termination `. The response flag will be set when the http stream is terminated by overload manager. -* admission control: added :ref:`rps_threshold ` option that when average RPS of the sampling window is below this threshold, the filter will not throttle requests. Added :ref:`max_rejection_probability ` option to set an upper limit on the probability of rejection. -* bandwidth_limit: added new :ref:`HTTP bandwidth limit filter `. -* bootstrap: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. By setting the ``resolvers`` the external DNS servers to be used for external DNS queries can be specified. -* cluster: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. -* cluster: added :ref:`host_rewrite_literal ` to WeightedCluster. -* cluster: added :ref:`wait_for_warm_on_init `, which allows cluster readiness to not block on cluster warm-up. It is true by default, which preserves existing behavior. Currently, only applicable for DNS-based clusters. +* access_log: added the new response flag for :ref:`overload manager termination `. The response flag will be set when the http stream is terminated by overload manager. +* admission control: added :ref:`rps_threshold ` option that when average RPS of the sampling window is below this threshold, the filter will not throttle requests. Added :ref:`max_rejection_probability ` option to set an upper limit on the probability of rejection. +* bandwidth_limit: added new :ref:`HTTP bandwidth limit filter `. +* bootstrap: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. By setting the ``resolvers`` the external DNS servers to be used for external DNS queries can be specified. +* cluster: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting ``no_default_search_domain`` to true the DNS resolver will not use the default search domains. +* cluster: added :ref:`host_rewrite_literal ` to WeightedCluster. +* cluster: added :ref:`wait_for_warm_on_init `, which allows cluster readiness to not block on cluster warm-up. It is true by default, which preserves existing behavior. Currently, only applicable for DNS-based clusters. * composite filter: can now be used with filters that also add an access logger, such as the WASM filter. -* config: added stat :ref:`config_reload_time_ms `. -* connection_limit: added new :ref:`Network connection limit filter `. +* config: added stat :ref:`config_reload_time_ms `. +* connection_limit: added new :ref:`Network connection limit filter `. * crash support: restore crash context when continuing to processing requests or responses as a result of an asynchronous callback that invokes a filter directly. This is unlike the call stacks that go through the various network layers, to eventually reach the filter. For a concrete example see: ``Envoy::Extensions::HttpFilters::Cache::CacheFilter::getHeaders`` which posts a callback on the dispatcher that will invoke the filter directly. -* dns cache: added :ref:`preresolve_hostnames ` option to the DNS cache config. This option allows hostnames to be preresolved into the cache upon cache creation. This might provide performance improvement, in the form of cache hits, for hostnames that are going to be resolved during steady state and are known at config load time. -* dns cache: added :ref:`dns_query_timeout ` option to the DNS cache config. This option allows explicitly controlling the timeout of underlying queries independently of the underlying DNS platform implementation. Coupled with success and failure retry policies the use of this timeout will lead to more deterministic DNS resolution times. -* dns resolver: added ``DnsResolverOptions`` protobuf message to reconcile all of the DNS lookup option flags. By setting the configuration option :ref:`use_tcp_for_dns_lookups ` as true we can make the underlying dns resolver library to make only TCP queries to the DNS servers and by setting the configuration option :ref:`no_default_search_domain ` as true the DNS resolver library will not use the default search domains. -* dns resolver: added ``DnsResolutionConfig`` to combine :ref:`dns_resolver_options ` and :ref:`resolvers ` in a single protobuf message. The field ``resolvers`` can be specified with a list of DNS resolver addresses. If specified, DNS client library will perform resolution via the underlying DNS resolvers. Otherwise, the default system resolvers (e.g., /etc/resolv.conf) will be used. -* dns_filter: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting the configuration option ``use_tcp_for_dns_lookups`` to true we can make dns filter's external resolvers to answer queries using TCP only, by setting the configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query which replaces the pre-existing alpha api field ``upstream_resolvers``. -* dynamic_forward_proxy: added :ref:`dns_resolution_config ` option to the DNS cache config in order to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query instead of the system default resolvers. -* ext_authz_filter: added :ref:`bootstrap_metadata_labels_key ` option to configure labels of destination service. +* dns cache: added :ref:`preresolve_hostnames ` option to the DNS cache config. This option allows hostnames to be preresolved into the cache upon cache creation. This might provide performance improvement, in the form of cache hits, for hostnames that are going to be resolved during steady state and are known at config load time. +* dns cache: added :ref:`dns_query_timeout ` option to the DNS cache config. This option allows explicitly controlling the timeout of underlying queries independently of the underlying DNS platform implementation. Coupled with success and failure retry policies the use of this timeout will lead to more deterministic DNS resolution times. +* dns resolver: added ``DnsResolverOptions`` protobuf message to reconcile all of the DNS lookup option flags. By setting the configuration option :ref:`use_tcp_for_dns_lookups ` as true we can make the underlying dns resolver library to make only TCP queries to the DNS servers and by setting the configuration option :ref:`no_default_search_domain ` as true the DNS resolver library will not use the default search domains. +* dns resolver: added ``DnsResolutionConfig`` to combine :ref:`dns_resolver_options ` and :ref:`resolvers ` in a single protobuf message. The field ``resolvers`` can be specified with a list of DNS resolver addresses. If specified, DNS client library will perform resolution via the underlying DNS resolvers. Otherwise, the default system resolvers (e.g., /etc/resolv.conf) will be used. +* dns_filter: added :ref:`dns_resolution_config ` to aggregate all of the DNS resolver configuration in a single message. By setting the configuration option ``use_tcp_for_dns_lookups`` to true we can make dns filter's external resolvers to answer queries using TCP only, by setting the configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query which replaces the pre-existing alpha api field ``upstream_resolvers``. +* dynamic_forward_proxy: added :ref:`dns_resolution_config ` option to the DNS cache config in order to aggregate all of the DNS resolver configuration in a single message. By setting one such configuration option ``no_default_search_domain`` as true the DNS resolver will not use the default search domains. And by setting the configuration ``resolvers`` we can specify the external DNS servers to be used for external DNS query instead of the system default resolvers. +* ext_authz_filter: added :ref:`bootstrap_metadata_labels_key ` option to configure labels of destination service. * http: added new field ``is_optional`` to ``extensions.filters.network.http_connection_manager.v3.HttpFilter``. When set to ``true``, unsupported http filters will be ignored by envoy. This is also same with unsupported http filter in the typed per filter config. For more information, please reference - :ref:`HttpFilter `. -* http: added :ref:`scheme options ` for adding or overwriting scheme. -* http: added :ref:`stripping trailing host dot from host header ` support. -* http: added support for :ref:`original IP detection extensions `. - Two initial extensions were added, the :ref:`custom header ` extension and the - :ref:`xff ` extension. -* http: added a new option to upstream HTTP/2 :ref:`keepalive ` to send a PING ahead of a new stream if the connection has been idle for a sufficient duration. -* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. -* http: added upstream and downstream alpha HTTP/3 support! See :ref:`quic_options ` for downstream and the new http3_protocol_options in :ref:`http_protocol_options ` for upstream HTTP/3. + :ref:`HttpFilter `. +* http: added :ref:`scheme options ` for adding or overwriting scheme. +* http: added :ref:`stripping trailing host dot from host header ` support. +* http: added support for :ref:`original IP detection extensions `. + Two initial extensions were added, the :ref:`custom header ` extension and the + :ref:`xff ` extension. +* http: added a new option to upstream HTTP/2 :ref:`keepalive ` to send a PING ahead of a new stream if the connection has been idle for a sufficient duration. +* http: added the ability to :ref:`unescape slash sequences ` in the path. Requests with unescaped slashes can be proxied, rejected or redirected to the new unescaped path. By default this feature is disabled. The default behavior can be overridden through :ref:`http_connection_manager.path_with_escaped_slashes_action` runtime variable. This action can be selectively enabled for a portion of requests by setting the :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime variable. +* http: added upstream and downstream alpha HTTP/3 support! See :ref:`quic_options ` for downstream and the new http3_protocol_options in :ref:`http_protocol_options ` for upstream HTTP/3. * http: raise max configurable max_request_headers_kb limit to 8192 KiB (8MiB) from 96 KiB in http connection manager. -* input matcher: added a new input matcher that :ref:`matches an IP address against a list of CIDR ranges `. -* jwt_authn: added support to fetch remote jwks asynchronously specified by :ref:`async_fetch `. -* jwt_authn: added support to add padding in the forwarded JWT payload specified by :ref:`pad_forward_payload_header `. +* input matcher: added a new input matcher that :ref:`matches an IP address against a list of CIDR ranges `. +* jwt_authn: added support to fetch remote jwks asynchronously specified by :ref:`async_fetch `. +* jwt_authn: added support to add padding in the forwarded JWT payload specified by :ref:`pad_forward_payload_header `. * listener: added ability to change an existing listener's address. -* listener: added filter chain match support for :ref:`direct source address `. -* local_rate_limit_filter: added suppoort for locally rate limiting http requests on a per connection basis. This can be enabled by setting the :ref:`local_rate_limit_per_downstream_connection ` field to true. -* metric service: added support for sending metric tags as labels. This can be enabled by setting the :ref:`emit_tags_as_labels ` field to true. -* proxy protocol: added support for generating the header while using the :ref:`HTTP connection manager `. This is done using the :ref:`Proxy Protocol Transport Socket ` on upstream clusters. +* listener: added filter chain match support for :ref:`direct source address `. +* local_rate_limit_filter: added suppoort for locally rate limiting http requests on a per connection basis. This can be enabled by setting the :ref:`local_rate_limit_per_downstream_connection ` field to true. +* metric service: added support for sending metric tags as labels. This can be enabled by setting the :ref:`emit_tags_as_labels ` field to true. +* proxy protocol: added support for generating the header while using the :ref:`HTTP connection manager `. This is done using the :ref:`Proxy Protocol Transport Socket ` on upstream clusters. This feature is currently affected by a memory leak `issue `_. * req_without_query: added access log formatter extension implementing command operator :ref:`REQ_WITHOUT_QUERY ` to log the request path, while excluding the query string. * router: added option ``suppress_grpc_request_failure_code_stats`` to :ref:`the router ` to allow users to exclude incrementing HTTP status code stats on gRPC requests. -* stats: added native :ref:`Graphite-formatted tag ` support. -* tcp: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic. -* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request and response size histograms. -* thrift_proxy: added support for :ref:`outlier detection `. +* stats: added native :ref:`Graphite-formatted tag ` support. +* tcp: added support for :ref:`preconnecting `. Preconnecting is off by default, but recommended for clusters serving latency-sensitive traffic. +* thrift_proxy: added per upstream metrics within the :ref:`thrift router ` for request and response size histograms. +* thrift_proxy: added support for :ref:`outlier detection `. * tls: allow dual ECDSA/RSA certs via SDS. Previously, SDS only supported a single certificate per context, and dual cert was only supported via non-SDS. -* tracing: add option :ref:`use_request_id_for_trace_sampling ` which allows configuring whether to perform sampling based on :ref:`x-request-id` or not. -* udp_proxy: added :ref:`key ` as another hash policy to support hash based routing on any given key. +* tracing: add option :ref:`use_request_id_for_trace_sampling ` which allows configuring whether to perform sampling based on :ref:`x-request-id` or not. +* udp_proxy: added :ref:`key ` as another hash policy to support hash based routing on any given key. * windows container image: added user, EnvoyUser which is part of the Network Configuration Operators group to the container image. Deprecated ---------- -* bootstrap: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* cluster: the fields :ref:`use_tcp_for_dns_lookups ` and :ref:`dns_resolvers ` are deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* dns_filter: the field :ref:`known_suffixes ` is deprecated. The internal data management of the filter has changed and the filter no longer uses the known_suffixes field. -* dynamic_forward_proxy: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. -* http: :ref:`xff_num_trusted_hops ` is deprecated in favor of :ref:`original IP detection extensions`. +* bootstrap: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* cluster: the fields :ref:`use_tcp_for_dns_lookups ` and :ref:`dns_resolvers ` are deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* dns_filter: the field :ref:`known_suffixes ` is deprecated. The internal data management of the filter has changed and the filter no longer uses the known_suffixes field. +* dynamic_forward_proxy: the field :ref:`use_tcp_for_dns_lookups ` is deprecated in favor of :ref:`dns_resolution_config ` which aggregates all of the DNS resolver configuration in a single message. +* http: :ref:`xff_num_trusted_hops ` is deprecated in favor of :ref:`original IP detection extensions`. diff --git a/docs/root/version_history/v1.19.1.rst b/docs/root/version_history/v1.19.1.rst new file mode 100644 index 0000000000000..40e19eeba5755 --- /dev/null +++ b/docs/root/version_history/v1.19.1.rst @@ -0,0 +1,33 @@ +1.19.1 (Aug 24, 2021) +===================== + +Incompatible Behavior Changes +----------------------------- + +Minor Behavior Changes +---------------------- +*Changes that may cause incompatibilities for some users, but should not for most* + +* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request + URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed + to stripping the #fragment instead by setting the runtime guard ``envoy.reloadable_features.http_reject_path_with_fragment`` + to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard + ``envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled``. This runtime guard must only be set + to false when existing non-compliant traffic relies on #fragment in URI. When this option is enabled, Envoy request + authorization extensions may be bypassed. This override and its associated behavior will be decommissioned after the standard deprecation period. +* http: stop processing pending H/2 frames if connection transitioned to the closed state. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.skip_dispatching_frames_for_closed_connection`` to false. + +Bug Fixes +--------- + +* ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. +* http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. + +Removed Config or Runtime +------------------------- + +New Features +------------ + +Deprecated +---------- diff --git a/docs/root/version_history/version_history.rst b/docs/root/version_history/version_history.rst index 7a203a956b58c..371bc2c398781 100644 --- a/docs/root/version_history/version_history.rst +++ b/docs/root/version_history/version_history.rst @@ -7,17 +7,25 @@ Version history :titlesonly: current + v1.19.1 v1.19.0 + v1.18.4 + v1.18.3 v1.18.2 v1.18.1 v1.18.0 + v1.17.4 + v1.17.3 v1.17.2 v1.17.1 v1.17.0 + v1.16.5 + v1.16.4 v1.16.3 v1.16.2 v1.16.1 v1.16.0 + v1.15.5 v1.15.4 v1.15.3 v1.15.2 diff --git a/envoy/api/BUILD b/envoy/api/BUILD index 55e267505ee54..904e5fff75f8a 100644 --- a/envoy/api/BUILD +++ b/envoy/api/BUILD @@ -18,6 +18,7 @@ envoy_cc_library( "//envoy/filesystem:filesystem_interface", "//envoy/server:process_context_interface", "//envoy/thread:thread_interface", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/envoy/api/api.h b/envoy/api/api.h index 89336aaf57347..c83198beed8be 100644 --- a/envoy/api/api.h +++ b/envoy/api/api.h @@ -5,6 +5,7 @@ #include "envoy/common/random_generator.h" #include "envoy/common/time.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/dispatcher.h" #include "envoy/event/scaled_range_timer_manager.h" #include "envoy/filesystem/filesystem.h" @@ -83,6 +84,11 @@ class Api { * @return an optional reference to the ProcessContext */ virtual ProcessContextOptRef processContext() PURE; + + /** + * @return the bootstrap Envoy started with. + */ + virtual const envoy::config::bootstrap::v3::Bootstrap& bootstrap() const PURE; }; using ApiPtr = std::unique_ptr; diff --git a/envoy/api/io_error.h b/envoy/api/io_error.h index ebb4f99b79854..f5de759194d18 100644 --- a/envoy/api/io_error.h +++ b/envoy/api/io_error.h @@ -42,6 +42,7 @@ class IoError { virtual IoErrorCode getErrorCode() const PURE; virtual std::string getErrorDetails() const PURE; + virtual int getSystemErrorCode() const PURE; }; using IoErrorDeleterType = void (*)(IoError*); diff --git a/envoy/buffer/BUILD b/envoy/buffer/BUILD index 3b9157d06f5fd..24a2e516527cd 100644 --- a/envoy/buffer/BUILD +++ b/envoy/buffer/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( ], deps = [ "//envoy/api:os_sys_calls_interface", + "//envoy/http:stream_reset_handler_interface", "//source/common/common:assert_lib", "//source/common/common:byte_order_lib", "//source/common/common:utility_lib", diff --git a/envoy/buffer/buffer.h b/envoy/buffer/buffer.h index c30cbd84f2ca4..73dada2f25c3f 100644 --- a/envoy/buffer/buffer.h +++ b/envoy/buffer/buffer.h @@ -9,6 +9,7 @@ #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/common/pure.h" +#include "envoy/http/stream_reset_handler.h" #include "source/common/common/assert.h" #include "source/common/common/byte_order.h" @@ -109,6 +110,19 @@ class BufferMemoryAccount { * @param amount the amount to credit. */ virtual void credit(uint64_t amount) PURE; + + /** + * Clears the associated downstream with this account. + * After this has been called, calls to reset the downstream become no-ops. + * Must be called before downstream is deleted. + */ + virtual void clearDownstream() PURE; + + /** + * Reset the downstream stream associated with this account. Resetting the downstream stream + * should trigger a reset of the corresponding upstream stream if it exists. + */ + virtual void resetDownstream() PURE; }; using BufferMemoryAccountSharedPtr = std::shared_ptr; @@ -480,7 +494,8 @@ class Instance { using InstancePtr = std::unique_ptr; /** - * A factory for creating buffers which call callbacks when reaching high and low watermarks. + * An abstract factory for creating watermarked buffers and buffer memory + * accounts. The factory also supports tracking active memory accounts. */ class WatermarkFactory { public: @@ -497,6 +512,26 @@ class WatermarkFactory { virtual InstancePtr createBuffer(std::function below_low_watermark, std::function above_high_watermark, std::function above_overflow_watermark) PURE; + + /** + * Create and returns a buffer memory account. + * + * @param reset_handler supplies the stream_reset_handler the account will + * invoke to reset the stream. + * @return a BufferMemoryAccountSharedPtr of the newly created account or + * nullptr if tracking is disabled. + */ + virtual BufferMemoryAccountSharedPtr createAccount(Http::StreamResetHandler& reset_handler) PURE; + + /** + * Goes through the tracked accounts, resetting the accounts and their + * corresponding stream depending on the pressure. + * + * @param pressure scaled threshold pressure used to compute the buckets to + * reset internally. + * @return the number of streams reset + */ + virtual uint64_t resetAccountsGivenPressure(float pressure) PURE; }; using WatermarkFactoryPtr = std::unique_ptr; diff --git a/envoy/common/BUILD b/envoy/common/BUILD index d7e0130cf1f13..dd12783097de0 100644 --- a/envoy/common/BUILD +++ b/envoy/common/BUILD @@ -71,6 +71,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "key_value_store_interface", + hdrs = ["key_value_store.h"], + deps = [ + "//envoy/protobuf:message_validator_interface", + "//envoy/registry", + ], +) + envoy_cc_library( name = "interval_set_interface", hdrs = ["interval_set.h"], diff --git a/envoy/common/key_value_store.h b/envoy/common/key_value_store.h new file mode 100644 index 0000000000000..3d3fb899fd940 --- /dev/null +++ b/envoy/common/key_value_store.h @@ -0,0 +1,85 @@ +#pragma once + +#include "envoy/common/pure.h" +#include "envoy/config/typed_config.h" +#include "envoy/event/dispatcher.h" +#include "envoy/filesystem/filesystem.h" +#include "envoy/protobuf/message_validator.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { + +// A key value store, designed to periodically flush key value pairs to long +// term storage (disk or otherwise) +class KeyValueStore { +public: + virtual ~KeyValueStore() = default; + + /** + * Adds or updates a key:value pair in the store. + * @param key supplies a key to add or update. + * @param value supplies the value to set for that key. + */ + virtual void addOrUpdate(absl::string_view key, absl::string_view value) PURE; + + /** + * Removes a key:value pair from the store. This is a no-op if the key is not present. + * @param key supplies a key to remove from the store. + */ + virtual void remove(absl::string_view key) PURE; + + /** + * Returns the value of the key provided. + * @param key supplies a key to return the value of. + * @return the value, if the key is in the store, absl::nullopt otherwise. + */ + virtual absl::optional get(absl::string_view key) PURE; + + /** + * Flushes the store to long term storage. + */ + virtual void flush() PURE; + + // Returns for the iterate function. + enum class Iterate { Continue, Break }; + + /** + * Callback when calling iterate() in a key value store. + * @param key is the key for a given entry + * @param value is the value for a given entry + * @return Iterate::Continue to continue iteration, or Iterate::Break to stop. + */ + using ConstIterateCb = std::function; + + /** + * Iterate over a key value store. + * @param cb supplies the iteration callback. + */ + virtual void iterate(ConstIterateCb cb) const PURE; +}; + +using KeyValueStorePtr = std::unique_ptr; + +// A factory for creating key value stores. +class KeyValueStoreFactory : public Envoy::Config::TypedFactory { +public: + /** + * Function to create KeyValueStores from the specified config. + * @param cb supplies the key value store configuration + * @param validation_visitor the configuration validator + * @dispatcher the dispatcher for the thread, for flush alarms. + * @file_system the file system. + * @return a new key value store. + */ + virtual KeyValueStorePtr createStore(const Protobuf::Message& config, + ProtobufMessage::ValidationVisitor& validation_visitor, + Event::Dispatcher& dispatcher, + Filesystem::Instance& file_system) PURE; + + // @brief the category of the key value store for factory registration. + std::string category() const override { return "envoy.common.key_value"; } +}; + +} // namespace Envoy diff --git a/envoy/config/BUILD b/envoy/config/BUILD index cb8b6a9ce6cf1..4b3c37ef97148 100644 --- a/envoy/config/BUILD +++ b/envoy/config/BUILD @@ -70,7 +70,6 @@ envoy_cc_library( hdrs = ["subscription.h"], deps = [ "//envoy/stats:stats_macros", - "//source/common/config:api_type_oracle_lib", "//source/common/protobuf", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], diff --git a/envoy/config/extension_config_provider.h b/envoy/config/extension_config_provider.h index 6524f10270881..ce84225eb6236 100644 --- a/envoy/config/extension_config_provider.h +++ b/envoy/config/extension_config_provider.h @@ -35,9 +35,10 @@ template class ExtensionConfigProvider { virtual absl::optional config() PURE; }; -template -class DynamicExtensionConfigProvider : public ExtensionConfigProvider { +template class DynamicExtensionConfigProviderBase { public: + virtual ~DynamicExtensionConfigProviderBase() = default; + /** * Update the provider with a new configuration. * @param config is an extension factory callback to replace the existing configuration. @@ -59,5 +60,9 @@ class DynamicExtensionConfigProvider : public ExtensionConfigProvider +class DynamicExtensionConfigProvider : public DynamicExtensionConfigProviderBase, + public ExtensionConfigProvider {}; + } // namespace Config } // namespace Envoy diff --git a/envoy/config/grpc_mux.h b/envoy/config/grpc_mux.h index 0139cb3d95243..f3c5d7c00808a 100644 --- a/envoy/config/grpc_mux.h +++ b/envoy/config/grpc_mux.h @@ -105,6 +105,9 @@ class GrpcMux { virtual void requestOnDemandUpdate(const std::string& type_url, const absl::flat_hash_set& for_update) PURE; + + // TODO (dmitri-d) remove this when legacy muxes have been removed + virtual bool isUnified() const { return false; } }; using GrpcMuxPtr = std::unique_ptr; diff --git a/envoy/filter/http/BUILD b/envoy/filter/BUILD similarity index 82% rename from envoy/filter/http/BUILD rename to envoy/filter/BUILD index 887967fd5e8c4..02e267213684f 100644 --- a/envoy/filter/http/BUILD +++ b/envoy/filter/BUILD @@ -9,8 +9,8 @@ licenses(["notice"]) # Apache 2 envoy_package() envoy_cc_library( - name = "filter_config_provider_interface", - hdrs = ["filter_config_provider.h"], + name = "config_provider_manager_interface", + hdrs = ["config_provider_manager.h"], deps = [ "//envoy/config:extension_config_provider_interface", "//envoy/http:filter_interface", diff --git a/envoy/filter/http/filter_config_provider.h b/envoy/filter/config_provider_manager.h similarity index 98% rename from envoy/filter/http/filter_config_provider.h rename to envoy/filter/config_provider_manager.h index 097c1616099ca..824c36e73dea5 100644 --- a/envoy/filter/http/filter_config_provider.h +++ b/envoy/filter/config_provider_manager.h @@ -10,7 +10,6 @@ namespace Envoy { namespace Filter { -namespace Http { using FilterConfigProvider = Envoy::Config::ExtensionConfigProvider; diff --git a/envoy/http/BUILD b/envoy/http/BUILD index 09d26b373967a..ee9e63a01d6c2 100644 --- a/envoy/http/BUILD +++ b/envoy/http/BUILD @@ -43,6 +43,7 @@ envoy_cc_library( ":header_map_interface", ":metadata_interface", ":protocol_interface", + ":stream_reset_handler_interface", "//envoy/buffer:buffer_interface", "//envoy/grpc:status", "//envoy/network:address_interface", @@ -51,6 +52,11 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "stream_reset_handler_interface", + hdrs = ["stream_reset_handler.h"], +) + envoy_cc_library( name = "codes_interface", hdrs = ["codes.h"], diff --git a/envoy/http/alternate_protocols_cache.h b/envoy/http/alternate_protocols_cache.h index 2d02cda9bd48e..5dbbff8c29096 100644 --- a/envoy/http/alternate_protocols_cache.h +++ b/envoy/http/alternate_protocols_cache.h @@ -17,7 +17,7 @@ namespace Http { /** * Tracks alternate protocols that can be used to make an HTTP connection to an origin server. - * See https://tools.ietf.org/html/rfc7838 for HTTP Alternate Services and + * See https://tools.ietf.org/html/rfc7838 for HTTP Alternative Services and * https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 for the * "HTTPS" DNS resource record. */ diff --git a/envoy/http/codec.h b/envoy/http/codec.h index 3674fd88c3123..023b6a129bed3 100644 --- a/envoy/http/codec.h +++ b/envoy/http/codec.h @@ -11,6 +11,7 @@ #include "envoy/http/header_map.h" #include "envoy/http/metadata_interface.h" #include "envoy/http/protocol.h" +#include "envoy/http/stream_reset_handler.h" #include "envoy/network/address.h" #include "envoy/stream_info/stream_info.h" @@ -263,32 +264,6 @@ class ResponseDecoder : public virtual StreamDecoder { virtual void dumpState(std::ostream& os, int indent_level = 0) const PURE; }; -/** - * Stream reset reasons. - */ -enum class StreamResetReason { - // If a local codec level reset was sent on the stream. - LocalReset, - // If a local codec level refused stream reset was sent on the stream (allowing for retry). - LocalRefusedStreamReset, - // If a remote codec level reset was received on the stream. - RemoteReset, - // If a remote codec level refused stream reset was received on the stream (allowing for retry). - RemoteRefusedStreamReset, - // If the stream was locally reset by a connection pool due to an initial connection failure. - ConnectionFailure, - // If the stream was locally reset due to connection termination. - ConnectionTermination, - // The stream was reset because of a resource overflow. - Overflow, - // Either there was an early TCP error for a CONNECT request or the peer reset with CONNECT_ERROR - ConnectError, - // Received payload did not conform to HTTP protocol. - ProtocolError, - // If the stream was locally reset by the Overload Manager. - OverloadManager -}; - /** * Callbacks that fire against a stream. */ @@ -319,10 +294,8 @@ class StreamCallbacks { /** * An HTTP stream (request, response, and push). */ -class Stream { +class Stream : public StreamResetHandler { public: - virtual ~Stream() = default; - /** * Add stream callbacks. * @param callbacks supplies the callbacks to fire on stream events. @@ -335,12 +308,6 @@ class Stream { */ virtual void removeCallbacks(StreamCallbacks& callbacks) PURE; - /** - * Reset the stream. No events will fire beyond this point. - * @param reason supplies the reset reason. - */ - virtual void resetStream(StreamResetReason reason) PURE; - /** * Enable/disable further data from this stream. * Cessation of data may not be immediate. For example, for HTTP/2 this may stop further flow diff --git a/envoy/http/header_map.h b/envoy/http/header_map.h index a3f1fd855b417..d50e173fecbb1 100644 --- a/envoy/http/header_map.h +++ b/envoy/http/header_map.h @@ -318,7 +318,8 @@ class HeaderEntry { HEADER_FUNC(EnvoyExpectedRequestTimeoutMs) \ HEADER_FUNC(EnvoyMaxRetries) \ HEADER_FUNC(EnvoyUpstreamRequestTimeoutMs) \ - HEADER_FUNC(EnvoyUpstreamRequestPerTryTimeoutMs) + HEADER_FUNC(EnvoyUpstreamRequestPerTryTimeoutMs) \ + HEADER_FUNC(EnvoyUpstreamStreamDurationMs) #define INLINE_REQ_HEADERS(HEADER_FUNC) \ INLINE_REQ_STRING_HEADERS(HEADER_FUNC) \ diff --git a/envoy/http/stream_reset_handler.h b/envoy/http/stream_reset_handler.h new file mode 100644 index 0000000000000..7a6d23c5dac09 --- /dev/null +++ b/envoy/http/stream_reset_handler.h @@ -0,0 +1,50 @@ +#pragma once + +#include "envoy/common/pure.h" + +// Stream Reset is refactored from the codec to avoid cyclical dependencies with +// the BufferMemoryAccount interface. +namespace Envoy { +namespace Http { + +/** + * Stream reset reasons. + */ +enum class StreamResetReason { + // If a local codec level reset was sent on the stream. + LocalReset, + // If a local codec level refused stream reset was sent on the stream (allowing for retry). + LocalRefusedStreamReset, + // If a remote codec level reset was received on the stream. + RemoteReset, + // If a remote codec level refused stream reset was received on the stream (allowing for retry). + RemoteRefusedStreamReset, + // If the stream was locally reset by a connection pool due to an initial connection failure. + ConnectionFailure, + // If the stream was locally reset due to connection termination. + ConnectionTermination, + // The stream was reset because of a resource overflow. + Overflow, + // Either there was an early TCP error for a CONNECT request or the peer reset with CONNECT_ERROR + ConnectError, + // Received payload did not conform to HTTP protocol. + ProtocolError, + // If the stream was locally reset by the Overload Manager. + OverloadManager +}; + +/** + * Handler to reset an underlying HTTP stream. + */ +class StreamResetHandler { +public: + virtual ~StreamResetHandler() = default; + /** + * Reset the stream. No events will fire beyond this point. + * @param reason supplies the reset reason. + */ + virtual void resetStream(StreamResetReason reason) PURE; +}; + +} // namespace Http +} // namespace Envoy diff --git a/envoy/network/BUILD b/envoy/network/BUILD index a76a4a4dfe544..3caab27a2aaa9 100644 --- a/envoy/network/BUILD +++ b/envoy/network/BUILD @@ -116,6 +116,7 @@ envoy_cc_library( deps = [ ":address_interface", ":io_handle_interface", + "//envoy/ssl:connection_interface", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/envoy/network/connection.h b/envoy/network/connection.h index 620a5b2e1ec31..f5d23938e9f90 100644 --- a/envoy/network/connection.h +++ b/envoy/network/connection.h @@ -192,10 +192,10 @@ class Connection : public Event::DeferredDeletable, virtual bool readEnabled() const PURE; /** - * @return the address provider backing this connection. + * @return the connection info provider backing this connection. */ - virtual const SocketAddressProvider& addressProvider() const PURE; - virtual SocketAddressProviderSharedPtr addressProviderSharedPtr() const PURE; + virtual const ConnectionInfoProvider& connectionInfoProvider() const PURE; + virtual ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const PURE; /** * Credentials of the peer of a socket as decided by SO_PEERCRED. @@ -339,8 +339,11 @@ class ServerConnection : public virtual Connection { * Set the amount of time allowed for the transport socket to report that a connection is * established. The provided timeout is relative to the current time. If this method is called * after a connection has already been established, it is a no-op. + * + * If a timeout occurs, `timeout_stat` will be incremented. */ - virtual void setTransportSocketConnectTimeout(std::chrono::milliseconds timeout) PURE; + virtual void setTransportSocketConnectTimeout(std::chrono::milliseconds timeout, + Stats::Counter& timeout_stat) PURE; }; using ServerConnectionPtr = std::unique_ptr; diff --git a/envoy/network/socket.h b/envoy/network/socket.h index 79940b26f9226..e7200f8ace026 100644 --- a/envoy/network/socket.h +++ b/envoy/network/socket.h @@ -9,6 +9,7 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/network/address.h" #include "envoy/network/io_handle.h" +#include "envoy/ssl/connection.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" @@ -49,9 +50,9 @@ struct SocketOptionName { * TODO(soulxu): Since there are more than address information inside the provider, this will be * renamed as ConnectionInfoProvider. Ref https://github.com/envoyproxy/envoy/issues/17168 */ -class SocketAddressProvider { +class ConnectionInfoProvider { public: - virtual ~SocketAddressProvider() = default; + virtual ~ConnectionInfoProvider() = default; /** * @return the local address of the socket. @@ -86,15 +87,21 @@ class SocketAddressProvider { virtual absl::optional connectionID() const PURE; /** - * Dumps the state of the SocketAddressProvider to the given ostream. + * Dumps the state of the ConnectionInfoProvider to the given ostream. * * @param os the std::ostream to dump to. * @param indent_level the level of indentation. */ virtual void dumpState(std::ostream& os, int indent_level) const PURE; + + /** + * @return the downstream SSL connection. This will be nullptr if the downstream + * connection does not use SSL. + */ + virtual Ssl::ConnectionInfoConstSharedPtr sslConnection() const PURE; }; -class SocketAddressSetter : public SocketAddressProvider { +class ConnectionInfoSetter : public ConnectionInfoProvider { public: /** * Set the local address of the socket. On accepted sockets the local address defaults to the @@ -131,10 +138,15 @@ class SocketAddressSetter : public SocketAddressProvider { * @param id Connection ID of the downstream connection. **/ virtual void setConnectionID(uint64_t id) PURE; + + /** + * @param connection_info sets the downstream ssl connection. + */ + virtual void setSslConnection(const Ssl::ConnectionInfoConstSharedPtr& ssl_connection_info) PURE; }; -using SocketAddressSetterSharedPtr = std::shared_ptr; -using SocketAddressProviderSharedPtr = std::shared_ptr; +using ConnectionInfoSetterSharedPtr = std::shared_ptr; +using ConnectionInfoProviderSharedPtr = std::shared_ptr; /** * Base class for Sockets @@ -149,11 +161,11 @@ class Socket { enum class Type { Stream, Datagram }; /** - * @return the address provider backing this socket. + * @return the connection info provider backing this socket. */ - virtual SocketAddressSetter& addressProvider() PURE; - virtual const SocketAddressProvider& addressProvider() const PURE; - virtual SocketAddressProviderSharedPtr addressProviderSharedPtr() const PURE; + virtual ConnectionInfoSetter& connectionInfoProvider() PURE; + virtual const ConnectionInfoProvider& connectionInfoProvider() const PURE; + virtual ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const PURE; /** * @return IoHandle for the underlying connection diff --git a/envoy/registry/BUILD b/envoy/registry/BUILD index de34bccd492fa..b5d9e6e59cefc 100644 --- a/envoy/registry/BUILD +++ b/envoy/registry/BUILD @@ -13,7 +13,7 @@ envoy_cc_library( hdrs = ["registry.h"], deps = [ "//source/common/common:assert_lib", - "//source/common/config:api_type_oracle_lib", + "//source/common/common:minimal_logger_lib", "//source/common/protobuf:utility_lib", "//source/extensions/common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", diff --git a/envoy/registry/registry.h b/envoy/registry/registry.h index 1bc909f23c76b..941006d7d228a 100644 --- a/envoy/registry/registry.h +++ b/envoy/registry/registry.h @@ -13,7 +13,6 @@ #include "source/common/common/fmt.h" #include "source/common/common/logger.h" #include "source/common/common/utility.h" -#include "source/common/config/api_type_oracle.h" #include "source/common/protobuf/utility.h" #include "source/extensions/common/utility.h" @@ -356,25 +355,16 @@ template class FactoryRegistry : public Logger::Loggablefind(config_type); - if (it != mapping->end() && it->second != factory) { - // Mark double-registered types with a nullptr. - // See issue https://github.com/envoyproxy/envoy/issues/9643. - ENVOY_LOG(warn, "Double registration for type: '{}' by '{}' and '{}'", config_type, - factory->name(), it->second ? it->second->name() : ""); - it->second = nullptr; - } else { - mapping->emplace(std::make_pair(config_type, factory)); - } - - const Protobuf::Descriptor* previous = - Config::ApiTypeOracle::getEarlierVersionDescriptor(config_type); - if (previous == nullptr) { - break; - } - config_type = previous->full_name(); + // Register config types in the mapping. + auto it = mapping->find(config_type); + if (it != mapping->end() && it->second != factory) { + // Mark double-registered types with a nullptr. + // See issue https://github.com/envoyproxy/envoy/issues/9643. + ENVOY_LOG(warn, "Double registration for type: '{}' by '{}' and '{}'", config_type, + factory->name(), it->second ? it->second->name() : ""); + it->second = nullptr; + } else { + mapping->emplace(std::make_pair(config_type, factory)); } } diff --git a/envoy/router/router.h b/envoy/router/router.h index e8a932262bfb7..57591b2501a8c 100644 --- a/envoy/router/router.h +++ b/envoy/router/router.h @@ -18,7 +18,6 @@ #include "envoy/http/codes.h" #include "envoy/http/conn_pool.h" #include "envoy/http/hash_policy.h" -#include "envoy/http/header_map.h" #include "envoy/router/internal_redirect.h" #include "envoy/tcp/conn_pool.h" #include "envoy/tracing/http_tracer.h" @@ -547,21 +546,6 @@ class VirtualHost { */ virtual const Config& routeConfig() const PURE; - /** - * @return const RouteSpecificFilterConfig* the per-filter config pre-processed object for - * the given filter name. If there is not per-filter config, or the filter factory returns - * nullptr, nullptr is returned. - */ - virtual const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const PURE; - - /** - * This is a helper on top of perFilterConfig() that casts the return object to the specified - * type. - */ - template const Derived* perFilterConfigTyped(const std::string& name) const { - return dynamic_cast(perFilterConfig(name)); - } - /** * @return bool whether to include the request count header in upstream requests. */ @@ -891,18 +875,6 @@ class RouteEntry : public ResponseEntry { */ virtual bool includeVirtualHostRateLimits() const PURE; - /** - * @return const Envoy::Config::TypedMetadata& return the typed metadata provided in the config - * for this route. - */ - virtual const Envoy::Config::TypedMetadata& typedMetadata() const PURE; - - /** - * @return const envoy::config::core::v3::Metadata& return the metadata provided in the config for - * this route. - */ - virtual const envoy::config::core::v3::Metadata& metadata() const PURE; - /** * @return TlsContextMatchCriteria* the tls context match criterion for this route. If there is no * tls context match criteria, nullptr is returned. @@ -914,31 +886,6 @@ class RouteEntry : public ResponseEntry { */ virtual const PathMatchCriterion& pathMatchCriterion() const PURE; - /** - * @return const RouteSpecificFilterConfig* the per-filter config pre-processed object for - * the given filter name. If there is not per-filter config, or the filter factory returns - * nullptr, nullptr is returned. - */ - virtual const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const PURE; - - /** - * This is a helper on top of perFilterConfig() that casts the return object to the specified - * type. - */ - template const Derived* perFilterConfigTyped(const std::string& name) const { - return dynamic_cast(perFilterConfig(name)); - }; - - /** - * This is a helper to get the route's per-filter config if it exists, otherwise the virtual - * host's. Or nullptr if none of them exist. - */ - template - const Derived* mostSpecificPerFilterConfigTyped(const std::string& name) const { - const Derived* config = perFilterConfigTyped(name); - return config ? config : virtualHost().perFilterConfigTyped(name); - } - /** * True if the virtual host this RouteEntry belongs to is configured to include the attempt * count header. @@ -1062,19 +1009,33 @@ class Route { virtual const RouteTracing* tracingConfig() const PURE; /** - * @return const RouteSpecificFilterConfig* the per-filter config pre-processed object for - * the given filter name. If there is not per-filter config, or the filter factory returns - * nullptr, nullptr is returned. + * This is a helper to get the route's per-filter config if it exists, otherwise the virtual + * host's. Or nullptr if none of them exist. */ - virtual const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const PURE; + virtual const RouteSpecificFilterConfig* + mostSpecificPerFilterConfig(const std::string& name) const PURE; /** - * This is a helper on top of perFilterConfig() that casts the return object to the specified - * type. + * Fold all the available per route filter configs, invoking the callback with each config (if + * it is present). Iteration of the configs is in order of specificity. That means that the + * callback will be called first for a config on a Virtual host, then a route, and finally a route + * entry (weighted cluster). If a config is not present, the callback will not be invoked. */ - template const Derived* perFilterConfigTyped(const std::string& name) const { - return dynamic_cast(perFilterConfig(name)); - } + virtual void traversePerFilterConfig( + const std::string& filter_name, + std::function cb) const PURE; + + /** + * @return const envoy::config::core::v3::Metadata& return the metadata provided in the config for + * this route. + */ + virtual const envoy::config::core::v3::Metadata& metadata() const PURE; + + /** + * @return const Envoy::Config::TypedMetadata& return the typed metadata provided in the config + * for this route. + */ + virtual const Envoy::Config::TypedMetadata& typedMetadata() const PURE; }; using RouteConstSharedPtr = std::shared_ptr; diff --git a/envoy/secret/secret_manager.h b/envoy/secret/secret_manager.h index ce13f6eba2145..bd4f4c1850b67 100644 --- a/envoy/secret/secret_manager.h +++ b/envoy/secret/secret_manager.h @@ -156,5 +156,7 @@ class SecretManager { Server::Configuration::TransportSocketFactoryContext& secret_provider_context) PURE; }; +using SecretManagerPtr = std::unique_ptr; + } // namespace Secret } // namespace Envoy diff --git a/envoy/server/factory_context.h b/envoy/server/factory_context.h index bc93d6bd8547e..03cbfe9c23ff4 100644 --- a/envoy/server/factory_context.h +++ b/envoy/server/factory_context.h @@ -211,6 +211,11 @@ class FactoryContext : public virtual CommonFactoryContext { */ virtual Stats::Scope& listenerScope() PURE; + /** + * @return bool if these filters are created under the scope of a Quic listener. + */ + virtual bool isQuicListener() const PURE; + /** * @return const envoy::config::core::v3::Metadata& the config metadata associated with this * listener. diff --git a/envoy/server/options.h b/envoy/server/options.h index 05729bbe0181d..3968b7f88ca34 100644 --- a/envoy/server/options.h +++ b/envoy/server/options.h @@ -126,11 +126,6 @@ class Options { */ virtual const envoy::config::bootstrap::v3::Bootstrap& configProto() const PURE; - /** - * @return const absl::optional& the bootstrap version to use, if specified. - */ - virtual const absl::optional& bootstrapVersion() const PURE; - /** * @return bool allow unknown fields in the static configuration? */ diff --git a/envoy/server/overload/overload_manager.h b/envoy/server/overload/overload_manager.h index 7aa694b34f489..4939897c70792 100644 --- a/envoy/server/overload/overload_manager.h +++ b/envoy/server/overload/overload_manager.h @@ -34,10 +34,24 @@ class OverloadActionNameValues { // Overload action to reduce some subset of configured timeouts. const std::string ReduceTimeouts = "envoy.overload_actions.reduce_timeouts"; + + // Overload action to reset streams using excessive memory. + const std::string ResetStreams = "envoy.overload_actions.reset_high_memory_stream"; }; using OverloadActionNames = ConstSingleton; +/** + * Well-known overload action stats. + */ +class OverloadActionStatsNameValues { +public: + // Count of ther number of streams the reset streams action has reset + const std::string ResetStreamsCount = "envoy.overload_actions.reset_high_memory_stream.count"; +}; + +using OverloadActionStatsNames = ConstSingleton; + /** * The OverloadManager protects the Envoy instance from being overwhelmed by client * requests. It monitors a set of resources and notifies registered listeners if diff --git a/envoy/server/transport_socket_config.h b/envoy/server/transport_socket_config.h index 2cea944a4f45c..38308a9f1a642 100644 --- a/envoy/server/transport_socket_config.h +++ b/envoy/server/transport_socket_config.h @@ -101,6 +101,8 @@ class TransportSocketFactoryContext { virtual Api::Api& api() PURE; }; +using TransportSocketFactoryContextPtr = std::unique_ptr; + class TransportSocketConfigFactory : public Config::TypedFactory { public: ~TransportSocketConfigFactory() override = default; diff --git a/envoy/stats/allocator.h b/envoy/stats/allocator.h index f6181553ad851..6f9cc9715ea43 100644 --- a/envoy/stats/allocator.h +++ b/envoy/stats/allocator.h @@ -58,6 +58,31 @@ class Allocator { virtual const SymbolTable& constSymbolTable() const PURE; virtual SymbolTable& symbolTable() PURE; + /** + * Mark rejected stats as deleted by moving them to a different vector, so they don't show up + * when iterating over stats, but prevent crashes when trying to access references to them. + * Note that allocating a stat with the same name after calling this will + * return a new stat. Hence callers should seek to avoid this situation, as is + * done in ThreadLocalStore. + */ + virtual void markCounterForDeletion(const CounterSharedPtr& counter) PURE; + virtual void markGaugeForDeletion(const GaugeSharedPtr& gauge) PURE; + virtual void markTextReadoutForDeletion(const TextReadoutSharedPtr& text_readout) PURE; + + /** + * Iterate over all stats that need to be sinked. Note, that implementations can potentially hold + * on to a mutex that will deadlock if the passed in functors try to create or delete a stat. + * @param f_size functor that is provided the number of all sinked stats. Note this is called + * only once, prior to any calls to f_stat. + * @param f_stat functor that is provided one sinked stat at a time. + */ + virtual void forEachCounter(std::function f_size, + std::function f_stat) const PURE; + virtual void forEachGauge(std::function f_size, + std::function f_stat) const PURE; + virtual void forEachTextReadout(std::function f_size, + std::function f_stat) const PURE; + // TODO(jmarantz): create a parallel mechanism to instantiate histograms. At // the moment, histograms don't fit the same pattern of counters and gauges // as they are not actually created in the context of a stats allocator. diff --git a/envoy/stats/store.h b/envoy/stats/store.h index 191ed0f8589c9..a682fb0cd3d5f 100644 --- a/envoy/stats/store.h +++ b/envoy/stats/store.h @@ -7,6 +7,7 @@ #include "envoy/common/pure.h" #include "envoy/stats/histogram.h" #include "envoy/stats/scope.h" +#include "envoy/stats/stats.h" #include "envoy/stats/stats_matcher.h" #include "envoy/stats/tag_producer.h" @@ -48,6 +49,21 @@ class Store : public Scope { * @return a list of all known histograms. */ virtual std::vector histograms() const PURE; + + /** + * Iterate over all stats that need to be sinked. Note, that implementations can potentially hold + * on to a mutex that will deadlock if the passed in functors try to create or delete a stat. + * @param f_size functor that is provided the number of all sinked stats. + * @param f_stat functor that is provided one sinked stat at a time. + */ + virtual void forEachCounter(std::function f_size, + std::function f_stat) const PURE; + + virtual void forEachGauge(std::function f_size, + std::function f_stat) const PURE; + + virtual void forEachTextReadout(std::function f_size, + std::function f_stat) const PURE; }; using StorePtr = std::unique_ptr; diff --git a/envoy/stream_info/stream_info.h b/envoy/stream_info/stream_info.h index 7a67ff946f180..8d01eca502e83 100644 --- a/envoy/stream_info/stream_info.h +++ b/envoy/stream_info/stream_info.h @@ -25,7 +25,8 @@ namespace Envoy { namespace Router { -class RouteEntry; +class Route; +using RouteConstSharedPtr = std::shared_ptr; } // namespace Router namespace Upstream { @@ -463,21 +464,9 @@ class StreamInfo { virtual void healthCheck(bool is_health_check) PURE; /** - * @return the downstream address provider. + * @return the downstream connection info provider. */ - virtual const Network::SocketAddressProvider& downstreamAddressProvider() const PURE; - - /** - * @param connection_info sets the downstream ssl connection. - */ - virtual void - setDownstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& ssl_connection_info) PURE; - - /** - * @return the downstream SSL connection. This will be nullptr if the downstream - * connection does not use SSL. - */ - virtual Ssl::ConnectionInfoConstSharedPtr downstreamSslConnection() const PURE; + virtual const Network::ConnectionInfoProvider& downstreamAddressProvider() const PURE; /** * @param connection_info sets the upstream ssl connection. @@ -492,10 +481,9 @@ class StreamInfo { virtual Ssl::ConnectionInfoConstSharedPtr upstreamSslConnection() const PURE; /** - * @return const Router::RouteEntry* Get the route entry selected for this request. Note: this - * will be nullptr if no route was selected. + * @return const Router::RouteConstSharedPtr Get the route selected for this request. */ - virtual const Router::RouteEntry* routeEntry() const PURE; + virtual Router::RouteConstSharedPtr route() const PURE; /** * @return const envoy::config::core::v3::Metadata& the dynamic metadata associated with this @@ -593,6 +581,27 @@ class StreamInfo { * @return Network filter chain name of the downstream connection. */ virtual const std::string& filterChainName() const PURE; + + /** + * @param connection ID of the upstream connection. + */ + virtual void setUpstreamConnectionId(uint64_t id) PURE; + + /** + * @return the ID of the upstream connection, or absl::nullopt if not available. + */ + virtual absl::optional upstreamConnectionId() const PURE; + + /** + * @param attempt_count, the number of times the request was attempted upstream. + */ + virtual void setAttemptCount(uint32_t attempt_count) PURE; + + /** + * @return the number of times the request was attempted upstream, absl::nullopt if the request + * was never attempted upstream. + */ + virtual absl::optional attemptCount() const PURE; }; } // namespace StreamInfo diff --git a/envoy/tracing/trace_context.h b/envoy/tracing/trace_context.h index 12bd04fb8303b..e5ab6466328a5 100644 --- a/envoy/tracing/trace_context.h +++ b/envoy/tracing/trace_context.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "envoy/common/pure.h" @@ -14,22 +15,58 @@ namespace Tracing { * Protocol-independent abstraction for traceable stream. It hides the differences between different * protocol and provides tracer driver with common methods for obtaining and setting the tracing * context. - * - * TODO(wbpcode): A new interface should be added to obtain general traceable stream information, - * such as host, RPC method, protocol identification, etc. At the same time, a new interface needs - * to be added to support traversal of all trace contexts. */ class TraceContext { public: virtual ~TraceContext() = default; + using IterateCallback = std::function; + + /** + * Get context protocol. + * + * @return A string view representing the protocol of the traceable stream behind the context. + */ + virtual absl::string_view protocol() const PURE; + + /** + * Get context authority. + * + * @return The authority of traceable stream. It generally consists of the host and an optional + * user information and an optional port. + */ + virtual absl::string_view authority() const PURE; + + /** + * Get context path. + * + * @return The path of traceable stream. The content and meaning of path are determined by + * specific protocol itself. + */ + virtual absl::string_view path() const PURE; + + /** + * Get context method. + * + * @return The method of traceable stream. The content and meaning of method are determined by + * specific protocol itself. + */ + virtual absl::string_view method() const PURE; + + /** + * Iterate over all context entry. + * + * @param callback supplies the iteration callback. + */ + virtual void forEach(IterateCallback callback) const PURE; + /** * Get tracing context value by key. * * @param key The context key of string view type. * @return The optional context value of string_view type. */ - virtual absl::optional getTraceContext(absl::string_view key) const PURE; + virtual absl::optional getByKey(absl::string_view key) const PURE; /** * Set new tracing context key/value pair. @@ -37,7 +74,7 @@ class TraceContext { * @param key The context key of string view type. * @param val The context value of string view type. */ - virtual void setTraceContext(absl::string_view key, absl::string_view val) PURE; + virtual void setByKey(absl::string_view key, absl::string_view val) PURE; /** * Set new tracing context key/value pair. The key MUST point to data that will live beyond @@ -46,12 +83,7 @@ class TraceContext { * @param key The context key of string view type. * @param val The context value of string view type. */ - virtual void setTraceContextReferenceKey(absl::string_view key, absl::string_view val) { - // The reference semantics of key and value are ignored by default. Derived classes that wish to - // use reference semantics to improve performance or reduce memory overhead can override this - // method. - setTraceContext(key, val); - } + virtual void setByReferenceKey(absl::string_view key, absl::string_view val) PURE; /** * Set new tracing context key/value pair. Both key and val MUST point to data that will live @@ -60,12 +92,7 @@ class TraceContext { * @param key The context key of string view type. * @param val The context value of string view type. */ - virtual void setTraceContextReference(absl::string_view key, absl::string_view val) { - // The reference semantics of key and value are ignored by default. Derived classes that wish to - // use reference semantics to improve performance or reduce memory overhead can override this - // method. - setTraceContext(key, val); - } + virtual void setByReference(absl::string_view key, absl::string_view val) PURE; }; } // namespace Tracing diff --git a/envoy/upstream/BUILD b/envoy/upstream/BUILD index 7a19f61deec29..19eb5d80b1ad0 100644 --- a/envoy/upstream/BUILD +++ b/envoy/upstream/BUILD @@ -186,3 +186,10 @@ envoy_cc_library( "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "scheduler_interface", + hdrs = ["scheduler.h"], + deps = [ + ], +) diff --git a/envoy/upstream/cluster_manager.h b/envoy/upstream/cluster_manager.h index 180045116ec32..20ca0acca4df0 100644 --- a/envoy/upstream/cluster_manager.h +++ b/envoy/upstream/cluster_manager.h @@ -310,6 +310,17 @@ class ClusterManager { virtual const ClusterRequestResponseSizeStatNames& clusterRequestResponseSizeStatNames() const PURE; virtual const ClusterTimeoutBudgetStatNames& clusterTimeoutBudgetStatNames() const PURE; + + /** + * Drain all connection pool connections owned by this cluster. + * @param cluster, the cluster to drain. + */ + virtual void drainConnections(const std::string& cluster) PURE; + + /** + * Drain all connection pool connections owned by all clusters in the cluster manager. + */ + virtual void drainConnections() PURE; }; using ClusterManagerPtr = std::unique_ptr; diff --git a/envoy/upstream/load_balancer.h b/envoy/upstream/load_balancer.h index b1b20324b84ef..af85933b63d3a 100644 --- a/envoy/upstream/load_balancer.h +++ b/envoy/upstream/load_balancer.h @@ -173,5 +173,26 @@ class ThreadAwareLoadBalancer { using ThreadAwareLoadBalancerPtr = std::unique_ptr; +/** + * Factory for (thread-aware) load balancers. To support a load balancing policy of + * LOAD_BALANCING_POLICY_CONFIG, at least one load balancer factory corresponding to a policy in + * load_balancing_policy must be registered with Envoy. Envoy will use the first policy for which + * it has a registered factory. + */ +class TypedLoadBalancerFactory : public Config::UntypedFactory { +public: + ~TypedLoadBalancerFactory() override = default; + + /** + * @return ThreadAwareLoadBalancerPtr a new thread-aware load balancer. + */ + virtual ThreadAwareLoadBalancerPtr + create(const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& stats_scope, + Runtime::Loader& runtime, Random::RandomGenerator& random, + const ::envoy::config::cluster::v3::LoadBalancingPolicy_Policy& lb_policy) PURE; + + std::string category() const override { return "envoy.load_balancers"; } +}; + } // namespace Upstream } // namespace Envoy diff --git a/envoy/upstream/load_balancer_type.h b/envoy/upstream/load_balancer_type.h index 280c317cdcce2..b8423a1d703cc 100644 --- a/envoy/upstream/load_balancer_type.h +++ b/envoy/upstream/load_balancer_type.h @@ -22,7 +22,8 @@ enum class LoadBalancerType { RingHash, OriginalDst, Maglev, - ClusterProvided + ClusterProvided, + LoadBalancingPolicyConfig }; /** diff --git a/envoy/upstream/scheduler.h b/envoy/upstream/scheduler.h new file mode 100644 index 0000000000000..ed2bce6b87301 --- /dev/null +++ b/envoy/upstream/scheduler.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +namespace Envoy { +namespace Upstream { + +/** + * The base class for scheduler implementations used in various load balancers. + */ +template class Scheduler { +public: + virtual ~Scheduler() = default; + + /** + * Each time peekAgain is called, it will return the best-effort subsequent + * pick, popping and reinserting the entry as if it had been picked. + * The first time peekAgain is called, it will return the + * first item which will be picked, the second time it is called it will + * return the second item which will be picked. As picks occur, that window + * will shrink. + * + * @param calculate_weight for implementations that choose to support it, this predicate specifies + * the new weight of the entry. + * @return std::shared_ptr the best effort subsequent pick. + */ + + virtual std::shared_ptr peekAgain(std::function calculate_weight) = 0; + + /** + * Pick a queue entry with closest deadline. + * + * @param calculate_weight for implementations that choose to support it, this predicate specifies + * the new weight of the entry. + * @return std::shared_ptr to next valid the queue entry if or nullptr if none exists. + */ + virtual std::shared_ptr pickAndAdd(std::function calculate_weight) = 0; + + /** + * Insert entry into queue with a given weight. + * + * @param weight entry weight. + * @param entry shared pointer to entry. + */ + virtual void add(double weight, std::shared_ptr entry) = 0; + + /** + * Returns true if the scheduler is empty and nothing has been added. + * + * @return bool whether or not the internal container is empty. + */ + virtual bool empty() const = 0; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/envoy/upstream/upstream.h b/envoy/upstream/upstream.h index d8b73f39de23f..30bf5d8bb211c 100644 --- a/envoy/upstream/upstream.h +++ b/envoy/upstream/upstream.h @@ -197,6 +197,8 @@ using HealthyHostVector = Phantom; using DegradedHostVector = Phantom; using ExcludedHostVector = Phantom; using HostMap = absl::flat_hash_map; +using HostMapSharedPtr = std::shared_ptr; +using HostMapConstSharedPtr = std::shared_ptr; using HostVectorSharedPtr = std::shared_ptr; using HostVectorConstSharedPtr = std::shared_ptr; @@ -424,6 +426,12 @@ class PrioritySet { */ virtual const std::vector& hostSetsPerPriority() const PURE; + /** + * @return HostMapConstSharedPtr read only cross priority host map that indexed by host address + * string. + */ + virtual HostMapConstSharedPtr crossPriorityHostMap() const PURE; + /** * Parameter class for updateHosts. */ @@ -447,11 +455,14 @@ class PrioritySet { * @param hosts_added supplies the hosts added since the last update. * @param hosts_removed supplies the hosts removed since the last update. * @param overprovisioning_factor if presents, overwrites the current overprovisioning_factor. + * @param cross_priority_host_map read only cross-priority host map which is created in the main + * thread and shared by all the worker threads. */ - virtual void updateHosts(uint32_t priority, UpdateHostsParams&& update_host_params, + virtual void updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, const HostVector& hosts_removed, - absl::optional overprovisioning_factor) PURE; + absl::optional overprovisioning_factor, + HostMapConstSharedPtr cross_priority_host_map = nullptr) PURE; /** * Callback provided during batch updates that can be used to update hosts. @@ -469,7 +480,7 @@ class PrioritySet { * @param hosts_removed supplies the hosts removed since the last update. * @param overprovisioning_factor if presents, overwrites the current overprovisioning_factor. */ - virtual void updateHosts(uint32_t priority, UpdateHostsParams&& update_host_params, + virtual void updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, const HostVector& hosts_removed, absl::optional overprovisioning_factor) PURE; @@ -692,6 +703,8 @@ using ProtocolOptionsConfigConstSharedPtr = std::shared_ptr(extensionProtocolOptions(name)); } + /** + * @return const envoy::config::cluster::v3::LoadBalancingPolicy_Policy& the load balancing policy + * to use for this cluster. + */ + virtual const envoy::config::cluster::v3::LoadBalancingPolicy_Policy& + loadBalancingPolicy() const PURE; + + /** + * @return the load balancer factory for this cluster if the load balancing type is + * LOAD_BALANCING_POLICY_CONFIG. + */ + virtual TypedLoadBalancerFactory* loadBalancerFactory() const PURE; + /** * @return const envoy::config::cluster::v3::Cluster::CommonLbConfig& the common configuration for * all load balancers for this cluster. diff --git a/examples/BUILD b/examples/BUILD index e8ea0aed15fd3..80becfa0e61a7 100644 --- a/examples/BUILD +++ b/examples/BUILD @@ -18,10 +18,33 @@ filegroup( "dynamic-config-fs/**/*", "jaeger-native-tracing/*", "**/*docker-compose*.yaml", + # Contrib extensions tested over in contrib. + "mysql/*", + "postgres/*", ], ), ) +genrule( + name = "contrib_configs", + srcs = glob( + [ + "mysql/*.yaml", + "postgres/*.yaml", + ], + exclude = [ + "**/*docker-compose*.yaml", + ], + ), + outs = ["example_configs.tar"], + cmd = ( + "$(location //configs:configgen.sh) NO_CONFIGGEN $(@D) $(SRCS)" + ), + tools = [ + "//configs:configgen.sh", + ], +) + filegroup( name = "certs", srcs = glob(["_extra_certs/*.pem"]), diff --git a/examples/double-proxy/envoy-backend.yaml b/examples/double-proxy/envoy-backend.yaml index 1d764fe77d64c..07cc1a7905f1d 100644 --- a/examples/double-proxy/envoy-backend.yaml +++ b/examples/double-proxy/envoy-backend.yaml @@ -9,10 +9,6 @@ static_resources: - name: "envoy.filters.listener.tls_inspector" filter_chains: - filters: - - name: envoy.filters.network.postgres_proxy - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.postgres_proxy.v3alpha.PostgresProxy - stat_prefix: egress_postgres - name: envoy.filters.network.tcp_proxy typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy diff --git a/examples/double-proxy/envoy-frontend.yaml b/examples/double-proxy/envoy-frontend.yaml index af836ba8fc30d..37acbf334124e 100644 --- a/examples/double-proxy/envoy-frontend.yaml +++ b/examples/double-proxy/envoy-frontend.yaml @@ -7,10 +7,6 @@ static_resources: port_value: 5432 filter_chains: - filters: - - name: envoy.filters.network.postgres_proxy - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.postgres_proxy.v3alpha.PostgresProxy - stat_prefix: egress_postgres - name: envoy.filters.network.tcp_proxy typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy diff --git a/examples/dynamic-config-cp/Dockerfile-control-plane b/examples/dynamic-config-cp/Dockerfile-control-plane index 39c7f2ca4223b..cc676ae2412ff 100644 --- a/examples/dynamic-config-cp/Dockerfile-control-plane +++ b/examples/dynamic-config-cp/Dockerfile-control-plane @@ -6,7 +6,7 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /tmp/* /var/tmp/* /var/lib/apt/lists/* -RUN git clone https://github.com/envoyproxy/go-control-plane +RUN git clone https://github.com/envoyproxy/go-control-plane && cd go-control-plane && git checkout b4adc3bb5fe5288bff01cd452dad418ef98c676e ADD ./resource.go /go/go-control-plane/internal/example/resource.go RUN cd go-control-plane && make bin/example WORKDIR /go/go-control-plane diff --git a/examples/dynamic-config-cp/resource.go b/examples/dynamic-config-cp/resource.go index 2cc38f35bc200..cd0ce177abc2f 100644 --- a/examples/dynamic-config-cp/resource.go +++ b/examples/dynamic-config-cp/resource.go @@ -170,5 +170,6 @@ func GenerateSnapshot() cache.Snapshot { []types.Resource{makeHTTPListener(ListenerName, RouteName)}, []types.Resource{}, // runtimes []types.Resource{}, // secrets + []types.Resource{}, // extensions configs ) } diff --git a/examples/dynamic-config-cp/verify.sh b/examples/dynamic-config-cp/verify.sh index 37bd30d661822..d684caebc9b1a 100755 --- a/examples/dynamic-config-cp/verify.sh +++ b/examples/dynamic-config-cp/verify.sh @@ -24,6 +24,8 @@ docker-compose up --build -d go-control-plane wait_for 30 sh -c "docker-compose ps go-control-plane | grep healthy | grep -v unhealthy" +sleep 2 + run_log "Check for response from service1 backend" responds_with \ "Request served by service1" \ diff --git a/examples/grpc-bridge/client/envoy-proxy.yaml b/examples/grpc-bridge/client/envoy-proxy.yaml index 31668610eb039..65795feae685e 100644 --- a/examples/grpc-bridge/client/envoy-proxy.yaml +++ b/examples/grpc-bridge/client/envoy-proxy.yaml @@ -38,7 +38,11 @@ static_resources: type: LOGICAL_DNS dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN - http_protocol_options: {} + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http_protocol_options: {} load_assignment: cluster_name: backend-proxy endpoints: diff --git a/examples/load-reporting-service/docker-compose.yaml b/examples/load-reporting-service/docker-compose.yaml index 3298a8287a017..3595e88dfe0cb 100644 --- a/examples/load-reporting-service/docker-compose.yaml +++ b/examples/load-reporting-service/docker-compose.yaml @@ -7,6 +7,7 @@ services: dockerfile: Dockerfile-envoy ports: - "80-81:80" + - "8081:8081" http_service: build: diff --git a/examples/load-reporting-service/go.mod b/examples/load-reporting-service/go.mod index 7d83f2780929f..4d38ae41a6b0b 100644 --- a/examples/load-reporting-service/go.mod +++ b/examples/load-reporting-service/go.mod @@ -3,9 +3,7 @@ module github.com/envoyproxy/envoy/examples/load-reporting-service go 1.13 require ( - github.com/envoyproxy/go-control-plane v0.9.0 - github.com/golang/protobuf v1.3.2 - golang.org/x/net v0.0.0-20200226121028-0de0cce0169b // indirect - golang.org/x/sys v0.0.0-20190412213103-97732733099d // indirect - google.golang.org/grpc v1.25.1 + github.com/envoyproxy/go-control-plane v0.9.9 + github.com/golang/protobuf v1.4.3 + google.golang.org/grpc v1.36.0 ) diff --git a/examples/load-reporting-service/go.sum b/examples/load-reporting-service/go.sum index 5667aadb396b3..c1abe44d79536 100644 --- a/examples/load-reporting-service/go.sum +++ b/examples/load-reporting-service/go.sum @@ -1,20 +1,56 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0 h1:67WMNTvGrl7V1dWdKCeTwxDr7nio9clKoTlLhwIPnT4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9 h1:vQLjymTobffN2R0F8eTqw6q7iozfRO5Z0m+/4Vw+/uA= +github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -22,20 +58,26 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISg golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -43,14 +85,37 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/examples/load-reporting-service/main.go b/examples/load-reporting-service/main.go index ed47ca58cc390..d9e6800066e51 100644 --- a/examples/load-reporting-service/main.go +++ b/examples/load-reporting-service/main.go @@ -5,7 +5,7 @@ import ( "net" "github.com/envoyproxy/envoy/examples/load-reporting-service/server" - gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" + gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" "google.golang.org/grpc" ) diff --git a/examples/load-reporting-service/server/lrs_server.go b/examples/load-reporting-service/server/lrs_server.go index 9fc0857fc7f80..9c662a94a5c3b 100644 --- a/examples/load-reporting-service/server/lrs_server.go +++ b/examples/load-reporting-service/server/lrs_server.go @@ -4,7 +4,7 @@ import ( "log" "sync" - gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" + gcpLoadStats "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" "github.com/golang/protobuf/ptypes/duration" ) diff --git a/examples/load-reporting-service/service-envoy-w-lrs.yaml b/examples/load-reporting-service/service-envoy-w-lrs.yaml index ac0ebca7774e4..c8f3a31da413b 100644 --- a/examples/load-reporting-service/service-envoy-w-lrs.yaml +++ b/examples/load-reporting-service/service-envoy-w-lrs.yaml @@ -40,6 +40,11 @@ static_resources: - name: load_reporting_cluster type: STRICT_DNS lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} load_assignment: cluster_name: load_reporting_cluster endpoints: diff --git a/examples/load-reporting-service/verify.sh b/examples/load-reporting-service/verify.sh index 9d9f1b332748a..c97b394aba4de 100755 --- a/examples/load-reporting-service/verify.sh +++ b/examples/load-reporting-service/verify.sh @@ -2,6 +2,7 @@ export NAME=load-reporting export UPARGS="--scale http_service=2" +export DELAY=10 # shellcheck source=examples/verify-common.sh . "$(dirname "${BASH_SOURCE[0]}")/../verify-common.sh" @@ -17,4 +18,12 @@ docker-compose logs http_service | grep http_service_2 | grep HTTP | grep 200 run_log "Check logs: lrs_server" docker-compose logs lrs_server | grep "up and running" +run_log "Check logs: envoy is connect to lrs_server" +responds_with \ + upstream_rq_200 \ + "http://localhost:8081/stats?filter=load_reporting_cluster" + +run_log "Check logs: lrs_server works normally" +docker-compose logs lrs_server | grep "Got stats from cluster" + # TODO(phlax): add some test/docs for interacting with load reporting server diff --git a/examples/mysql/Dockerfile-proxy b/examples/mysql/Dockerfile-proxy index f70f443114613..274973e070e65 100644 --- a/examples/mysql/Dockerfile-proxy +++ b/examples/mysql/Dockerfile-proxy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy-dev:latest +FROM envoyproxy/envoy-contrib-dev:latest COPY ./envoy.yaml /etc/envoy.yaml RUN chmod go+r /etc/envoy.yaml diff --git a/examples/postgres/Dockerfile-proxy b/examples/postgres/Dockerfile-proxy index f70f443114613..274973e070e65 100644 --- a/examples/postgres/Dockerfile-proxy +++ b/examples/postgres/Dockerfile-proxy @@ -1,4 +1,4 @@ -FROM envoyproxy/envoy-dev:latest +FROM envoyproxy/envoy-contrib-dev:latest COPY ./envoy.yaml /etc/envoy.yaml RUN chmod go+r /etc/envoy.yaml diff --git a/examples/tls-inspector/envoy.yaml b/examples/tls-inspector/envoy.yaml index b8e444dae1f27..75c5c52572ab4 100644 --- a/examples/tls-inspector/envoy.yaml +++ b/examples/tls-inspector/envoy.yaml @@ -1,5 +1,4 @@ admin: - access_log_path: "/dev/null" address: socket_address: address: 0.0.0.0 diff --git a/examples/udp/envoy.yaml b/examples/udp/envoy.yaml index e516eff5fd94b..309d7e0560462 100644 --- a/examples/udp/envoy.yaml +++ b/examples/udp/envoy.yaml @@ -28,7 +28,6 @@ static_resources: port_value: 5005 admin: - access_log_path: "/dev/null" address: socket_address: address: 0.0.0.0 diff --git a/examples/wasm-cc/docker-compose-wasm.yaml b/examples/wasm-cc/docker-compose-wasm.yaml index 0f3c45f09ce15..072928843f428 100644 --- a/examples/wasm-cc/docker-compose-wasm.yaml +++ b/examples/wasm-cc/docker-compose-wasm.yaml @@ -4,8 +4,7 @@ services: wasm_compile_update: image: envoyproxy/envoy-build-ubuntu:55d9e4719d2bd0accce8f829b44dab70cd42112a command: | - bash -c "bazel build //examples/wasm-cc:envoy_filter_http_wasm_updated_example.wasm \ - && cp -a bazel-bin/examples/wasm-cc/* /build" + bash -c "bazel build //examples/wasm-cc:envoy_filter_http_wasm_updated_example.wasm && cp -a bazel-bin/examples/wasm-cc/* /build" working_dir: /source volumes: - ../..:/source @@ -14,8 +13,7 @@ services: wasm_compile: image: envoyproxy/envoy-build-ubuntu:55d9e4719d2bd0accce8f829b44dab70cd42112a command: | - bash -c "bazel build //examples/wasm-cc:envoy_filter_http_wasm_example.wasm \ - && cp -a bazel-bin/examples/wasm-cc/* /build" + bash -c "bazel build //examples/wasm-cc:envoy_filter_http_wasm_example.wasm && cp -a bazel-bin/examples/wasm-cc/* /build" working_dir: /source volumes: - ../..:/source diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index a70eae799d797..5bbde32946b63 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -18,8 +18,6 @@ proto_library( "//envoy/api/v2/ratelimit:pkg", "//envoy/api/v2/route:pkg", "//envoy/config/bootstrap/v2:pkg", - "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/filter/fault/v2:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", @@ -59,6 +57,12 @@ proto_library( name = "v3_protos", visibility = ["//visibility:public"], deps = [ + "//contrib/envoy/extensions/filters/http/squash/v3:pkg", + "//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg", + "//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg", + "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", + "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", + "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", @@ -96,6 +100,7 @@ proto_library( "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", + "//envoy/extensions/common/key_value/v3:pkg", "//envoy/extensions/common/matching/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", @@ -145,7 +150,6 @@ proto_library( "//envoy/extensions/filters/http/rbac/v3:pkg", "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/set_metadata/v3:pkg", - "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", @@ -161,15 +165,11 @@ proto_library( "//envoy/extensions/filters/network/echo/v3:pkg", "//envoy/extensions/filters/network/ext_authz/v3:pkg", "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", - "//envoy/extensions/filters/network/kafka_broker/v3:pkg", "//envoy/extensions/filters/network/local_ratelimit/v3:pkg", "//envoy/extensions/filters/network/mongo_proxy/v3:pkg", - "//envoy/extensions/filters/network/mysql_proxy/v3:pkg", - "//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", - "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", @@ -180,6 +180,7 @@ proto_library( "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", + "//envoy/extensions/formatter/metadata/v3:pkg", "//envoy/extensions/formatter/req_without_query/v3:pkg", "//envoy/extensions/health_checkers/redis/v3:pkg", "//envoy/extensions/http/header_formatters/preserve_case/v3:pkg", @@ -188,6 +189,7 @@ proto_library( "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", + "//envoy/extensions/key_value/file_based/v3:pkg", "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", "//envoy/extensions/matching/input_matchers/ip/v3:pkg", @@ -255,3 +257,11 @@ proto_library( ":v3_protos", ], ) + +filegroup( + name = "proto_breaking_change_detector_buf_config", + srcs = [ + "buf.yaml", + ], + visibility = ["//visibility:public"], +) diff --git a/generated_api_shadow/bazel/repositories.bzl b/generated_api_shadow/bazel/repositories.bzl index 74e19f831179f..ef92aa45f0064 100644 --- a/generated_api_shadow/bazel/repositories.bzl +++ b/generated_api_shadow/bazel/repositories.bzl @@ -47,6 +47,11 @@ def api_dependencies(): name = "opentelemetry_proto", build_file_content = OPENTELEMETRY_LOGS_BUILD_CONTENT, ) + external_http_archive( + name = "com_github_bufbuild_buf", + build_file_content = BUF_BUILD_CONTENT, + tags = ["manual"], + ) PROMETHEUSMETRICS_BUILD_CONTENT = """ load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") @@ -150,3 +155,17 @@ go_proto_library( visibility = ["//visibility:public"], ) """ + +BUF_BUILD_CONTENT = """ +package( + default_visibility = ["//visibility:public"], +) + +filegroup( + name = "buf", + srcs = [ + "@com_github_bufbuild_buf//:bin/buf", + ], + tags = ["manual"], # buf is downloaded as a linux binary; tagged manual to prevent build for non-linux users +) +""" diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl index 968c6a9ffa286..be1e9c9789e4b 100644 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ b/generated_api_shadow/bazel/repository_locations.bzl @@ -44,9 +44,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_desc = "xDS API Working Group (xDS-WG)", project_url = "https://github.com/cncf/xds", # During the UDPA -> xDS migration, we aren't working with releases. - version = "b88cc788a63e5b38ee334a2e702c67901355ae2c", - sha256 = "3220df8564f217665b6e17776569c5f748178c2b9cbf83bb55a13ddc0a3738f0", - release_date = "2021-03-23", + version = "dd25fe81a44506ab21ea666fb70b3b1c4bb183ee", + sha256 = "9184235cd31272679e4c7f9232c341d4ea75351ded74d3fbba28b05c290bfa71", + release_date = "2021-07-22", strip_prefix = "xds-{version}", urls = ["https://github.com/cncf/xds/archive/{version}.tar.gz"], use_category = ["api"], @@ -118,4 +118,16 @@ REPOSITORY_LOCATIONS_SPEC = dict( urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/v{version}.tar.gz"], use_category = ["api"], ), + com_github_bufbuild_buf = dict( + project_name = "buf", + project_desc = "A new way of working with Protocol Buffers.", # Used for breaking change detection in API protobufs + project_url = "https://buf.build", + version = "0.53.0", + sha256 = "888bb52d358e34a8d6a57ecff426bed896bdf478ad13c78a70a9e1a9a2d75715", + strip_prefix = "buf", + urls = ["https://github.com/bufbuild/buf/releases/download/v{version}/buf-Linux-x86_64.tar.gz"], + release_date = "2021-08-25", + use_category = ["api"], + tags = ["manual"], + ), ) diff --git a/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/BUILD rename to generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto b/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/http/squash/v3/squash.proto rename to generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto diff --git a/api/envoy/extensions/tracers/datadog/v4alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD similarity index 82% rename from api/envoy/extensions/tracers/datadog/v4alpha/BUILD rename to generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD index d500cc41da1fe..3ca8242f77801 100644 --- a/api/envoy/extensions/tracers/datadog/v4alpha/BUILD +++ b/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD @@ -6,7 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/trace/v3:pkg", + "//envoy/extensions/transport_sockets/tls/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto b/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto new file mode 100644 index 0000000000000..b9efc278e6de8 --- /dev/null +++ b/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.sxg.v3alpha; + +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.sxg.v3alpha"; +option java_outer_classname = "SxgProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Signed HTTP Exchange Filter] +// SXG :ref:`configuration overview `. +// [#extension: envoy.filters.http.sxg] + +// [#next-free-field: 10] +message SXG { + // The SDS configuration for the public key data for the SSL certificate that will be used to sign the + // SXG response. + transport_sockets.tls.v3.SdsSecretConfig certificate = 1; + + // The SDS configuration for the private key data for the SSL certificate that will be used to sign the + // SXG response. + transport_sockets.tls.v3.SdsSecretConfig private_key = 2; + + // The duration for which the generated SXG package will be valid. Default is 604800s (7 days in seconds). + // Note that in order to account for clock skew, the timestamp will be backdated by a day. So, if duration + // is set to 7 days, that will be 7 days from 24 hours ago (6 days from now). Also note that while 6/7 days + // is appropriate for most content, if the downstream service is serving Javascript, or HTML with inline + // Javascript, 1 day (so, with backdated expiry, 2 days, or 172800 seconds) is more appropriate. + google.protobuf.Duration duration = 3; + + // The SXG response payload is Merkle Integrity Content Encoding (MICE) encoded (specification is [here](https://datatracker.ietf.org/doc/html/draft-thomson-http-mice-03)) + // This value indicates the record size in the encoded payload. The default value is 4096. + uint64 mi_record_size = 4; + + // The URI of certificate CBOR file published. Since it is required that the certificate CBOR file + // be served from the same domain as the SXG document, this should be a relative URI. + string cbor_url = 5 [(validate.rules).string = {min_len: 1 prefix: "/"}]; + + // URL to retrieve validity data for signature, a CBOR map. See specification [here](https://tools.ietf.org/html/draft-yasskin-httpbis-origin-signed-exchanges-impl-00#section-3.6) + string validity_url = 6 [(validate.rules).string = {min_len: 1 prefix: "/"}]; + + // Header that will be set if it is determined that the client can accept SXG (typically `accept: application/signed-exchange;v=b3) + // If not set, filter will default to: `x-client-can-accept-sxg` + string client_can_accept_sxg_header = 7 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true} + ]; + + // Header set by downstream service to signal that the response should be transformed to SXG If not set, + // filter will default to: `x-should-encode-sxg` + string should_encode_sxg_header = 8 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true} + ]; + + // Headers that will be stripped from the SXG document, by listing a prefix (i.e. `x-custom-` will cause + // all headers prefixed by `x-custom-` to be omitted from the SXG document) + repeated string header_prefix_filters = 9 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; +} diff --git a/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD rename to generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD diff --git a/generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto rename to generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD new file mode 100644 index 0000000000000..ee92fb652582e --- /dev/null +++ b/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto rename to generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD new file mode 100644 index 0000000000000..ee92fb652582e --- /dev/null +++ b/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto rename to generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD rename to generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD diff --git a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto similarity index 94% rename from api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto rename to generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto index c354b629bea28..12438751fada6 100644 --- a/api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto +++ b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package envoy.extensions.filters.network.rocketmq_proxy.v3; -import "envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; +import "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; import "google/protobuf/duration.proto"; diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto similarity index 100% rename from generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto rename to generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto index 917afc06eb0ed..7593ade49a62e 100644 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ b/generated_api_shadow/envoy/admin/v3/server_info.proto @@ -88,7 +88,9 @@ message CommandLineOptions { Immediate = 1; } - reserved 12; + reserved 12, 29; + + reserved "bootstrap_version"; // See :option:`--base-id` for details. uint64 base_id = 1; @@ -177,9 +179,6 @@ message CommandLineOptions { // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; - // See :option:`--bootstrap-version` for details. - uint32 bootstrap_version = 29; - // See :option:`--enable-fine-grain-logging` for details. bool enable_fine_grain_logging = 34; diff --git a/generated_api_shadow/envoy/admin/v4alpha/BUILD b/generated_api_shadow/envoy/admin/v4alpha/BUILD deleted file mode 100644 index 74de2ca2a3d53..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/admin/v3:pkg", - "//envoy/config/bootstrap/v4alpha:pkg", - "//envoy/config/cluster/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/tap/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/admin/v4alpha/certs.proto b/generated_api_shadow/envoy/admin/v4alpha/certs.proto deleted file mode 100644 index 0dd868f71fa6a..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/certs.proto +++ /dev/null @@ -1,86 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "CertsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Certificates] - -// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to -// display certificate information. See :ref:`/certs ` for more -// information. -message Certificates { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificates"; - - // List of certificates known to an Envoy. - repeated Certificate certificates = 1; -} - -message Certificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificate"; - - // Details of CA certificate. - repeated CertificateDetails ca_cert = 1; - - // Details of Certificate Chain - repeated CertificateDetails cert_chain = 2; -} - -// [#next-free-field: 8] -message CertificateDetails { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails"; - - message OcspDetails { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.CertificateDetails.OcspDetails"; - - // Indicates the time from which the OCSP response is valid. - google.protobuf.Timestamp valid_from = 1; - - // Indicates the time at which the OCSP response expires. - google.protobuf.Timestamp expiration = 2; - } - - // Path of the certificate. - string path = 1; - - // Certificate Serial Number. - string serial_number = 2; - - // List of Subject Alternate names. - repeated SubjectAlternateName subject_alt_names = 3; - - // Minimum of days until expiration of certificate and it's chain. - uint64 days_until_expiration = 4; - - // Indicates the time from which the certificate is valid. - google.protobuf.Timestamp valid_from = 5; - - // Indicates the time at which the certificate expires. - google.protobuf.Timestamp expiration_time = 6; - - // Details related to the OCSP response associated with this certificate, if any. - OcspDetails ocsp_details = 7; -} - -message SubjectAlternateName { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.SubjectAlternateName"; - - // Subject Alternate Name. - oneof name { - string dns = 1; - - string uri = 2; - - string ip_address = 3; - } -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/clusters.proto b/generated_api_shadow/envoy/admin/v4alpha/clusters.proto deleted file mode 100644 index 12969a28d0082..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/clusters.proto +++ /dev/null @@ -1,176 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/admin/v4alpha/metrics.proto"; -import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/health_check.proto"; -import "envoy/type/v3/percent.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "ClustersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Clusters] - -// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. -// See :ref:`/clusters ` for more information. -message Clusters { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Clusters"; - - // Mapping from cluster name to each cluster's status. - repeated ClusterStatus cluster_statuses = 1; -} - -// Details an individual cluster's current status. -// [#next-free-field: 8] -message ClusterStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClusterStatus"; - - // Name of the cluster. - string name = 1; - - // Denotes whether this cluster was added via API or configured statically. - bool added_via_api = 2; - - // The success rate threshold used in the last interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used to calculate the threshold. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used to calculate the threshold. - // The threshold is used to eject hosts based on their success rate. See - // :ref:`Cluster outlier detection ` documentation for details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.v3.Percent success_rate_ejection_threshold = 3; - - // Mapping from host address to the host's current status. - repeated HostStatus host_statuses = 4; - - // The success rate threshold used in the last interval when only locally originated failures were - // taken into account and externally originated errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. The threshold is used to eject hosts based on their success rate. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.v3.Percent local_origin_success_rate_ejection_threshold = 5; - - // :ref:`Circuit breaking ` settings of the cluster. - config.cluster.v4alpha.CircuitBreakers circuit_breakers = 6; - - // Observability name of the cluster. - string observability_name = 7; -} - -// Current state of a particular host. -// [#next-free-field: 10] -message HostStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostStatus"; - - // Address of this host. - config.core.v4alpha.Address address = 1; - - // List of stats specific to this host. - repeated SimpleMetric stats = 2; - - // The host's current health status. - HostHealthStatus health_status = 3; - - // Request success rate for this host over the last calculated interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used in success rate - // calculation. If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used in success rate calculation. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.v3.Percent success_rate = 4; - - // The host's weight. If not configured, the value defaults to 1. - uint32 weight = 5; - - // The hostname of the host, if applicable. - string hostname = 6; - - // The host's priority. If not configured, the value defaults to 0 (highest priority). - uint32 priority = 7; - - // Request success rate for this host over the last calculated - // interval when only locally originated errors are taken into account and externally originated - // errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.v3.Percent local_origin_success_rate = 8; - - // locality of the host. - config.core.v4alpha.Locality locality = 9; -} - -// Health status for a host. -// [#next-free-field: 9] -message HostHealthStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostHealthStatus"; - - // The host is currently failing active health checks. - bool failed_active_health_check = 1; - - // The host is currently considered an outlier and has been ejected. - bool failed_outlier_check = 2; - - // The host is currently being marked as degraded through active health checking. - bool failed_active_degraded_check = 4; - - // The host has been removed from service discovery, but is being stabilized due to active - // health checking. - bool pending_dynamic_removal = 5; - - // The host has not yet been health checked. - bool pending_active_hc = 6; - - // The host should be excluded from panic, spillover, etc. calculations because it was explicitly - // taken out of rotation via protocol signal and is not meant to be routed to. - bool excluded_via_immediate_hc_fail = 7; - - // The host failed active HC due to timeout. - bool active_hc_timeout = 8; - - // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported - // here. - // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] - config.core.v4alpha.HealthStatus eds_health_status = 3; -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto b/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto deleted file mode 100644 index 2e36bc16f9b60..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/config_dump.proto +++ /dev/null @@ -1,484 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/config/bootstrap/v4alpha/bootstrap.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "ConfigDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: ConfigDump] - -// Resource status from the view of a xDS client, which tells the synchronization -// status between the xDS client and the xDS server. -enum ClientResourceStatus { - // Resource status is not available/unknown. - UNKNOWN = 0; - - // Client requested this resource but hasn't received any update from management - // server. The client will not fail requests, but will queue them until update - // arrives or the client times out waiting for the resource. - REQUESTED = 1; - - // This resource has been requested by the client but has either not been - // delivered by the server or was previously delivered by the server and then - // subsequently removed from resources provided by the server. For more - // information, please refer to the :ref:`"Knowing When a Requested Resource - // Does Not Exist" ` section. - DOES_NOT_EXIST = 2; - - // Client received this resource and replied with ACK. - ACKED = 3; - - // Client received this resource and replied with NACK. - NACKED = 4; -} - -// The :ref:`/config_dump ` admin endpoint uses this wrapper -// message to maintain and serve arbitrary configuration information from any component in Envoy. -message ConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ConfigDump"; - - // This list is serialized and dumped in its entirety at the - // :ref:`/config_dump ` endpoint. - // - // The following configurations are currently supported and will be dumped in the order given - // below: - // - // * *bootstrap*: :ref:`BootstrapConfigDump ` - // * *clusters*: :ref:`ClustersConfigDump ` - // * *endpoints*: :ref:`EndpointsConfigDump ` - // * *listeners*: :ref:`ListenersConfigDump ` - // * *scoped_routes*: :ref:`ScopedRoutesConfigDump ` - // * *routes*: :ref:`RoutesConfigDump ` - // * *secrets*: :ref:`SecretsConfigDump ` - // - // EDS Configuration will only be dumped by using parameter `?include_eds` - // - // You can filter output with the resource and mask query parameters. - // See :ref:`/config_dump?resource={} `, - // :ref:`/config_dump?mask={} `, - // or :ref:`/config_dump?resource={},mask={} - // ` for more information. - repeated google.protobuf.Any configs = 1; -} - -message UpdateFailureState { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UpdateFailureState"; - - // What the component configuration would have been if the update had succeeded. - // This field may not be populated by xDS clients due to storage overhead. - google.protobuf.Any failed_configuration = 1; - - // Time of the latest failed update attempt. - google.protobuf.Timestamp last_update_attempt = 2; - - // Details about the last failed update attempt. - string details = 3; - - // This is the version of the rejected resource. - // [#not-implemented-hide:] - string version_info = 4; -} - -// This message describes the bootstrap configuration that Envoy was started with. This includes -// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate -// the static portions of an Envoy configuration by reusing the output as the bootstrap -// configuration for another Envoy. -message BootstrapConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.BootstrapConfigDump"; - - config.bootstrap.v4alpha.Bootstrap bootstrap = 1; - - // The timestamp when the BootstrapConfig was last updated. - google.protobuf.Timestamp last_updated = 2; -} - -// Envoy's listener manager fills this message with all currently known listeners. Listener -// configuration information can be used to recreate an Envoy configuration by populating all -// listeners as static listeners or by returning them in a LDS response. -message ListenersConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenersConfigDump"; - - // Describes a statically loaded listener. - message StaticListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ListenersConfigDump.StaticListener"; - - // The listener config. - google.protobuf.Any listener = 1; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 2; - } - - message DynamicListenerState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ListenersConfigDump.DynamicListenerState"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the listener was loaded. In the future, discrete per-listener versions may be supported - // by the API. - string version_info = 1; - - // The listener config. - google.protobuf.Any listener = 2; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 3; - } - - // Describes a dynamically loaded listener via the LDS API. - // [#next-free-field: 7] - message DynamicListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ListenersConfigDump.DynamicListener"; - - // The name or unique id of this listener, pulled from the DynamicListenerState config. - string name = 1; - - // The listener state for any active listener by this name. - // These are listeners that are available to service data plane traffic. - DynamicListenerState active_state = 2; - - // The listener state for any warming listener by this name. - // These are listeners that are currently undergoing warming in preparation to service data - // plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the warming listeners should generally be discarded. - DynamicListenerState warming_state = 3; - - // The listener state for any draining listener by this name. - // These are listeners that are currently undergoing draining in preparation to stop servicing - // data plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the draining listeners should generally be discarded. - DynamicListenerState draining_state = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - - // The statically loaded listener configs. - repeated StaticListener static_listeners = 2; - - // State for any warming, active, or draining listeners. - repeated DynamicListener dynamic_listeners = 3; -} - -// Envoy's cluster manager fills this message with all currently known clusters. Cluster -// configuration information can be used to recreate an Envoy configuration by populating all -// clusters as static clusters or by returning them in a CDS response. -message ClustersConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClustersConfigDump"; - - // Describes a statically loaded cluster. - message StaticCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ClustersConfigDump.StaticCluster"; - - // The cluster config. - google.protobuf.Any cluster = 1; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // Describes a dynamically loaded cluster via the CDS API. - // [#next-free-field: 6] - message DynamicCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ClustersConfigDump.DynamicCluster"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by - // the API. - string version_info = 1; - - // The cluster config. - google.protobuf.Any cluster = 2; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - - // The statically loaded cluster configs. - repeated StaticCluster static_clusters = 2; - - // The dynamically loaded active clusters. These are clusters that are available to service - // data plane traffic. - repeated DynamicCluster dynamic_active_clusters = 3; - - // The dynamically loaded warming clusters. These are clusters that are currently undergoing - // warming in preparation to service data plane traffic. Note that if attempting to recreate an - // Envoy configuration from a configuration dump, the warming clusters should generally be - // discarded. - repeated DynamicCluster dynamic_warming_clusters = 4; -} - -// Envoy's RDS implementation fills this message with all currently loaded routes, as described by -// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration -// or defined inline while configuring listeners are separated from those configured dynamically via RDS. -// Route configuration information can be used to recreate an Envoy configuration by populating all routes -// as static routes or by returning them in RDS responses. -message RoutesConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.RoutesConfigDump"; - - message StaticRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.RoutesConfigDump.StaticRouteConfig"; - - // The route config. - google.protobuf.Any route_config = 1; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // [#next-free-field: 6] - message DynamicRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the route configuration was loaded. - string version_info = 1; - - // The route config. - google.protobuf.Any route_config = 2; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // The statically loaded route configs. - repeated StaticRouteConfig static_route_configs = 2; - - // The dynamically loaded route configs. - repeated DynamicRouteConfig dynamic_route_configs = 3; -} - -// Envoy's scoped RDS implementation fills this message with all currently loaded route -// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both -// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the -// dynamically obtained scopes via the SRDS API. -message ScopedRoutesConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ScopedRoutesConfigDump"; - - message InlineScopedRouteConfigs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs"; - - // The name assigned to the scoped route configurations. - string name = 1; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 2; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - // [#next-free-field: 7] - message DynamicScopedRouteConfigs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs"; - - // The name assigned to the scoped route configurations. - string name = 1; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the scoped routes configuration was loaded. - string version_info = 2; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 3; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // The statically loaded scoped route configs. - repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; - - // The dynamically loaded scoped route configs. - repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; -} - -// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. -message SecretsConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SecretsConfigDump"; - - // DynamicSecret contains secret information fetched via SDS. - // [#next-free-field: 7] - message DynamicSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.SecretsConfigDump.DynamicSecret"; - - // The name assigned to the secret. - string name = 1; - - // This is the per-resource version information. - string version_info = 2; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 3; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // StaticSecret specifies statically loaded secret in bootstrap. - message StaticSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.SecretsConfigDump.StaticSecret"; - - // The name assigned to the secret. - string name = 1; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 2; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 3; - } - - // The statically loaded secrets. - repeated StaticSecret static_secrets = 1; - - // The dynamically loaded active secrets. These are secrets that are available to service - // clusters or listeners. - repeated DynamicSecret dynamic_active_secrets = 2; - - // The dynamically loaded warming secrets. These are secrets that are currently undergoing - // warming in preparation to service clusters or listeners. - repeated DynamicSecret dynamic_warming_secrets = 3; -} - -// Envoy's admin fill this message with all currently known endpoints. Endpoint -// configuration information can be used to recreate an Envoy configuration by populating all -// endpoints as static endpoints or by returning them in an EDS response. -message EndpointsConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump"; - - message StaticEndpointConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig"; - - // The endpoint config. - google.protobuf.Any endpoint_config = 1; - - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // [#next-free-field: 6] - message DynamicEndpointConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; - - // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the endpoint configuration was loaded. - string version_info = 1; - - // The endpoint config. - google.protobuf.Any endpoint_config = 2; - - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // The statically loaded endpoint configs. - repeated StaticEndpointConfig static_endpoint_configs = 2; - - // The dynamically loaded endpoint configs. - repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/init_dump.proto b/generated_api_shadow/envoy/admin/v4alpha/init_dump.proto deleted file mode 100644 index 81c423e52024d..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/init_dump.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "InitDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: InitDump] - -// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers, -// which provides the information of their unready targets. -// The :ref:`/init_dump ` will dump all unready targets information. -message UnreadyTargetsDumps { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UnreadyTargetsDumps"; - - // Message of unready targets information of an init manager. - message UnreadyTargetsDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump"; - - // Name of the init manager. Example: "init_manager_xxx". - string name = 1; - - // Names of unready targets of the init manager. Example: "target_xxx". - repeated string target_names = 2; - } - - // You can choose specific component to dump unready targets with mask query parameter. - // See :ref:`/init_dump?mask={} ` for more information. - // The dumps of unready targets of all init managers. - repeated UnreadyTargetsDump unready_targets_dumps = 1; -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/listeners.proto b/generated_api_shadow/envoy/admin/v4alpha/listeners.proto deleted file mode 100644 index 89bdc4c5bbf8d..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/listeners.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "ListenersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Listeners] - -// Admin endpoint uses this wrapper for `/listeners` to display listener status information. -// See :ref:`/listeners ` for more information. -message Listeners { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Listeners"; - - // List of listener statuses. - repeated ListenerStatus listener_statuses = 1; -} - -// Details an individual listener's current status. -message ListenerStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenerStatus"; - - // Name of the listener - string name = 1; - - // The actual local address that the listener is listening on. If a listener was configured - // to listen on port 0, then this address has the port that was allocated by the OS. - config.core.v4alpha.Address local_address = 2; -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/memory.proto b/generated_api_shadow/envoy/admin/v4alpha/memory.proto deleted file mode 100644 index d2f0b57229ce8..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/memory.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "MemoryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Memory] - -// Proto representation of the internal memory consumption of an Envoy instance. These represent -// values extracted from an internal TCMalloc instance. For more information, see the section of the -// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). -// [#next-free-field: 7] -message Memory { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Memory"; - - // The number of bytes allocated by the heap for Envoy. This is an alias for - // `generic.current_allocated_bytes`. - uint64 allocated = 1; - - // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for - // `generic.heap_size`. - uint64 heap_size = 2; - - // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards - // virtual memory usage, and depending on the OS, typically do not count towards physical memory - // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. - uint64 pageheap_unmapped = 3; - - // The number of bytes in free, mapped pages in the page heap. These bytes always count towards - // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also - // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. - uint64 pageheap_free = 4; - - // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias - // for `tcmalloc.current_total_thread_cache_bytes`. - uint64 total_thread_cache = 5; - - // The number of bytes of the physical memory usage by the allocator. This is an alias for - // `generic.total_physical_bytes`. - uint64 total_physical_bytes = 6; -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/metrics.proto b/generated_api_shadow/envoy/admin/v4alpha/metrics.proto deleted file mode 100644 index 78613320038b7..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/metrics.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "MetricsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Metrics] - -// Proto representation of an Envoy Counter or Gauge value. -message SimpleMetric { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SimpleMetric"; - - enum Type { - COUNTER = 0; - GAUGE = 1; - } - - // Type of the metric represented. - Type type = 1; - - // Current metric value. - uint64 value = 2; - - // Name of the metric. - string name = 3; -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/mutex_stats.proto b/generated_api_shadow/envoy/admin/v4alpha/mutex_stats.proto deleted file mode 100644 index 6f9fcd548cc04..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/mutex_stats.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "MutexStatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: MutexStats] - -// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run -// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` -// [docs](https://abseil.io/about/design/mutex#extra-features). -// -// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not -// correspond to core clock frequency. For more information, see the `CycleClock` -// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). -message MutexStats { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.MutexStats"; - - // The number of individual mutex contentions which have occurred since startup. - uint64 num_contentions = 1; - - // The length of the current contention wait cycle. - uint64 current_wait_cycles = 2; - - // The lifetime total of all contention wait cycles. - uint64 lifetime_wait_cycles = 3; -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto b/generated_api_shadow/envoy/admin/v4alpha/server_info.proto deleted file mode 100644 index 18e59c92b0eff..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/server_info.proto +++ /dev/null @@ -1,194 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "ServerInfoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Server State] - -// Proto representation of the value returned by /server_info, containing -// server version/server status information. -// [#next-free-field: 8] -message ServerInfo { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ServerInfo"; - - enum State { - // Server is live and serving traffic. - LIVE = 0; - - // Server is draining listeners in response to external health checks failing. - DRAINING = 1; - - // Server has not yet completed cluster manager initialization. - PRE_INITIALIZING = 2; - - // Server is running the cluster manager initialization callbacks (e.g., RDS). - INITIALIZING = 3; - } - - // Server version. - string version = 1; - - // State of the server. - State state = 2; - - // Uptime since current epoch was started. - google.protobuf.Duration uptime_current_epoch = 3; - - // Uptime since the start of the first epoch. - google.protobuf.Duration uptime_all_epochs = 4; - - // Hot restart version. - string hot_restart_version = 5; - - // Command line options the server is currently running with. - CommandLineOptions command_line_options = 6; - - // Populated node identity of this server. - config.core.v4alpha.Node node = 7; -} - -// [#next-free-field: 38] -message CommandLineOptions { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; - - enum IpVersion { - v4 = 0; - v6 = 1; - } - - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - - enum DrainStrategy { - // Gradually discourage connections over the course of the drain period. - Gradual = 0; - - // Discourage all connections for the duration of the drain sequence. - Immediate = 1; - } - - reserved 12, 20, 21; - - reserved "max_stats", "max_obj_name_len"; - - // See :option:`--base-id` for details. - uint64 base_id = 1; - - // See :option:`--use-dynamic-base-id` for details. - bool use_dynamic_base_id = 31; - - // See :option:`--base-id-path` for details. - string base_id_path = 32; - - // See :option:`--concurrency` for details. - uint32 concurrency = 2; - - // See :option:`--config-path` for details. - string config_path = 3; - - // See :option:`--config-yaml` for details. - string config_yaml = 4; - - // See :option:`--allow-unknown-static-fields` for details. - bool allow_unknown_static_fields = 5; - - // See :option:`--reject-unknown-dynamic-fields` for details. - bool reject_unknown_dynamic_fields = 26; - - // See :option:`--ignore-unknown-dynamic-fields` for details. - bool ignore_unknown_dynamic_fields = 30; - - // See :option:`--admin-address-path` for details. - string admin_address_path = 6; - - // See :option:`--local-address-ip-version` for details. - IpVersion local_address_ip_version = 7; - - // See :option:`--log-level` for details. - string log_level = 8; - - // See :option:`--component-log-level` for details. - string component_log_level = 9; - - // See :option:`--log-format` for details. - string log_format = 10; - - // See :option:`--log-format-escaped` for details. - bool log_format_escaped = 27; - - // See :option:`--log-path` for details. - string log_path = 11; - - // See :option:`--service-cluster` for details. - string service_cluster = 13; - - // See :option:`--service-node` for details. - string service_node = 14; - - // See :option:`--service-zone` for details. - string service_zone = 15; - - // See :option:`--file-flush-interval-msec` for details. - google.protobuf.Duration file_flush_interval = 16; - - // See :option:`--drain-time-s` for details. - google.protobuf.Duration drain_time = 17; - - // See :option:`--drain-strategy` for details. - DrainStrategy drain_strategy = 33; - - // See :option:`--parent-shutdown-time-s` for details. - google.protobuf.Duration parent_shutdown_time = 18; - - // See :option:`--mode` for details. - Mode mode = 19; - - // See :option:`--disable-hot-restart` for details. - bool disable_hot_restart = 22; - - // See :option:`--enable-mutex-tracing` for details. - bool enable_mutex_tracing = 23; - - // See :option:`--restart-epoch` for details. - uint32 restart_epoch = 24; - - // See :option:`--cpuset-threads` for details. - bool cpuset_threads = 25; - - // See :option:`--disable-extensions` for details. - repeated string disabled_extensions = 28; - - // See :option:`--bootstrap-version` for details. - uint32 bootstrap_version = 29; - - // See :option:`--enable-fine-grain-logging` for details. - bool enable_fine_grain_logging = 34; - - // See :option:`--socket-path` for details. - string socket_path = 35; - - // See :option:`--socket-mode` for details. - uint32 socket_mode = 36; - - // See :option:`--enable-core-dump` for details. - bool enable_core_dump = 37; -} diff --git a/generated_api_shadow/envoy/admin/v4alpha/tap.proto b/generated_api_shadow/envoy/admin/v4alpha/tap.proto deleted file mode 100644 index e892593804188..0000000000000 --- a/generated_api_shadow/envoy/admin/v4alpha/tap.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v4alpha; - -import "envoy/config/tap/v4alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v4alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap] - -// The /tap admin request body that is used to configure an active tap session. -message TapRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.TapRequest"; - - // The opaque configuration ID used to match the configuration to a loaded extension. - // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The tap configuration to load. - config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD b/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD deleted file mode 100644 index 68064d3b08d1e..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto deleted file mode 100644 index 3e0c7f53598cc..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v4alpha/accesslog.proto +++ /dev/null @@ -1,326 +0,0 @@ -syntax = "proto3"; - -package envoy.config.accesslog.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/metadata.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.accesslog.v4alpha"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common access log types] - -message AccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.AccessLog"; - - reserved 3; - - reserved "config"; - - // The name of the access log extension to instantiate. - // The name must match one of the compiled in loggers. - // See the :ref:`extensions listed in typed_config below ` for the default list of available loggers. - string name = 1; - - // Filter which is used to determine if the access log needs to be written. - AccessLogFilter filter = 2; - - // Custom configuration that must be set according to the access logger extension being instantiated. - // [#extension-category: envoy.access_loggers] - oneof config_type { - google.protobuf.Any typed_config = 4; - } -} - -// [#next-free-field: 13] -message AccessLogFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.AccessLogFilter"; - - oneof filter_specifier { - option (validate.required) = true; - - // Status code filter. - StatusCodeFilter status_code_filter = 1; - - // Duration filter. - DurationFilter duration_filter = 2; - - // Not health check filter. - NotHealthCheckFilter not_health_check_filter = 3; - - // Traceable filter. - TraceableFilter traceable_filter = 4; - - // Runtime filter. - RuntimeFilter runtime_filter = 5; - - // And filter. - AndFilter and_filter = 6; - - // Or filter. - OrFilter or_filter = 7; - - // Header filter. - HeaderFilter header_filter = 8; - - // Response flag filter. - ResponseFlagFilter response_flag_filter = 9; - - // gRPC status filter. - GrpcStatusFilter grpc_status_filter = 10; - - // Extension filter. - ExtensionFilter extension_filter = 11; - - // Metadata Filter - MetadataFilter metadata_filter = 12; - } -} - -// Filter on an integer comparison. -message ComparisonFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.ComparisonFilter"; - - enum Op { - // = - EQ = 0; - - // >= - GE = 1; - - // <= - LE = 2; - } - - // Comparison operator. - Op op = 1 [(validate.rules).enum = {defined_only: true}]; - - // Value to compare against. - core.v4alpha.RuntimeUInt32 value = 2; -} - -// Filters on HTTP response/status code. -message StatusCodeFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.StatusCodeFilter"; - - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters on total request duration in milliseconds. -message DurationFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.DurationFilter"; - - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters for requests that are not health check requests. A health check -// request is marked by the health check filter. -message NotHealthCheckFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.NotHealthCheckFilter"; -} - -// Filters for requests that are traceable. See the tracing overview for more -// information on how a request becomes traceable. -message TraceableFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.TraceableFilter"; -} - -// Filters for random sampling of requests. -message RuntimeFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.RuntimeFilter"; - - // Runtime key to get an optional overridden numerator for use in the - // *percent_sampled* field. If found in runtime, this value will replace the - // default numerator. - string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; - - // The default sampling percentage. If not specified, defaults to 0% with - // denominator of 100. - type.v3.FractionalPercent percent_sampled = 2; - - // By default, sampling pivots on the header - // :ref:`x-request-id` being - // present. If :ref:`x-request-id` - // is present, the filter will consistently sample across multiple hosts based - // on the runtime key value and the value extracted from - // :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will - // randomly sample based on the runtime key value alone. - // *use_independent_randomness* can be used for logging kill switches within - // complex nested :ref:`AndFilter - // ` and :ref:`OrFilter - // ` blocks that are easier to - // reason about from a probability perspective (i.e., setting to true will - // cause the filter to behave like an independent random variable when - // composed within logical operator filters). - bool use_independent_randomness = 3; -} - -// Performs a logical “and” operation on the result of each filter in filters. -// Filters are evaluated sequentially and if one of them returns false, the -// filter returns false immediately. -message AndFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.AndFilter"; - - repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// Performs a logical “or” operation on the result of each individual filter. -// Filters are evaluated sequentially and if one of them returns true, the -// filter returns true immediately. -message OrFilter { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.OrFilter"; - - repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; -} - -// Filters requests based on the presence or value of a request header. -message HeaderFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.HeaderFilter"; - - // Only requests with a header which matches the specified HeaderMatcher will - // pass the filter check. - route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; -} - -// Filters requests that received responses with an Envoy response flag set. -// A list of the response flags can be found -// in the access log formatter -// :ref:`documentation`. -message ResponseFlagFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.ResponseFlagFilter"; - - // Only responses with the any of the flags listed in this field will be - // logged. This field is optional. If it is not specified, then any response - // flag will pass the filter check. - repeated string flags = 1 [(validate.rules).repeated = { - items { - string { - in: "LH" - in: "UH" - in: "UT" - in: "LR" - in: "UR" - in: "UF" - in: "UC" - in: "UO" - in: "NR" - in: "DI" - in: "FI" - in: "RL" - in: "UAEX" - in: "RLSE" - in: "DC" - in: "URX" - in: "SI" - in: "IH" - in: "DPE" - in: "UMSDR" - in: "RFCF" - in: "NFCF" - in: "DT" - in: "UPE" - in: "NC" - in: "OM" - } - } - }]; -} - -// Filters gRPC requests based on their response status. If a gRPC status is not -// provided, the filter will infer the status from the HTTP status code. -message GrpcStatusFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.GrpcStatusFilter"; - - enum Status { - OK = 0; - CANCELED = 1; - UNKNOWN = 2; - INVALID_ARGUMENT = 3; - DEADLINE_EXCEEDED = 4; - NOT_FOUND = 5; - ALREADY_EXISTS = 6; - PERMISSION_DENIED = 7; - RESOURCE_EXHAUSTED = 8; - FAILED_PRECONDITION = 9; - ABORTED = 10; - OUT_OF_RANGE = 11; - UNIMPLEMENTED = 12; - INTERNAL = 13; - UNAVAILABLE = 14; - DATA_LOSS = 15; - UNAUTHENTICATED = 16; - } - - // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - - // If included and set to true, the filter will instead block all responses - // with a gRPC status or inferred gRPC status enumerated in statuses, and - // allow all other responses. - bool exclude = 2; -} - -// Filters based on matching dynamic metadata. -// If the matcher path and key correspond to an existing key in dynamic -// metadata, the request is logged only if the matcher value is equal to the -// metadata value. If the matcher path and key *do not* correspond to an -// existing key in dynamic metadata, the request is logged only if -// match_if_key_not_found is "true" or unset. -message MetadataFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.MetadataFilter"; - - // Matcher to check metadata for specified value. For example, to match on the - // access_log_hint metadata, set the filter to "envoy.common" and the path to - // "access_log_hint", and the value to "true". - type.matcher.v4alpha.MetadataMatcher matcher = 1; - - // Default result if the key does not exist in dynamic metadata: if unset or - // true, then log; if false, then don't log. - google.protobuf.BoolValue match_if_key_not_found = 2; -} - -// Extension filter is statically registered at runtime. -message ExtensionFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v3.ExtensionFilter"; - - reserved 2; - - reserved "config"; - - // The name of the filter implementation to instantiate. The name must - // match a statically registered filter. - string name = 1; - - // Custom configuration that depends on the filter being instantiated. - oneof config_type { - google.protobuf.Any typed_config = 3; - } -} diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD b/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD deleted file mode 100644 index b1604d76d220f..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/config/bootstrap/v3:pkg", - "//envoy/config/cluster/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/listener/v4alpha:pkg", - "//envoy/config/metrics/v4alpha:pkg", - "//envoy/config/overload/v3:pkg", - "//envoy/config/trace/v4alpha:pkg", - "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto deleted file mode 100644 index b21acabe686fc..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ /dev/null @@ -1,652 +0,0 @@ -syntax = "proto3"; - -package envoy.config.bootstrap.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; -import "envoy/config/cluster/v4alpha/cluster.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/event_service_config.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/resolver.proto"; -import "envoy/config/core/v4alpha/socket_option.proto"; -import "envoy/config/listener/v4alpha/listener.proto"; -import "envoy/config/metrics/v4alpha/stats.proto"; -import "envoy/config/overload/v3/overload.proto"; -import "envoy/config/trace/v4alpha/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.bootstrap.v4alpha"; -option java_outer_classname = "BootstrapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v3 configuration. See the :ref:`v3 configuration overview -// ` for more detail. - -// Bootstrap :ref:`configuration overview `. -// [#next-free-field: 33] -message Bootstrap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Bootstrap"; - - message StaticResources { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; - - // Static :ref:`Listeners `. These listeners are - // available regardless of LDS configuration. - repeated listener.v4alpha.Listener listeners = 1; - - // If a network based configuration source is specified for :ref:`cds_config - // `, it's necessary - // to have some initial cluster definitions available to allow Envoy to know - // how to speak to the management server. These cluster definitions may not - // use :ref:`EDS ` (i.e. they should be static - // IP or DNS-based). - repeated cluster.v4alpha.Cluster clusters = 2; - - // These static secrets can be used by :ref:`SdsSecretConfig - // ` - repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; - } - - // [#next-free-field: 7] - message DynamicResources { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; - - reserved 4; - - // All :ref:`Listeners ` are provided by a single - // :ref:`LDS ` configuration source. - core.v4alpha.ConfigSource lds_config = 1; - - // xdstp:// resource locator for listener collection. - // [#not-implemented-hide:] - string lds_resources_locator = 5; - - // All post-bootstrap :ref:`Cluster ` definitions are - // provided by a single :ref:`CDS ` - // configuration source. - core.v4alpha.ConfigSource cds_config = 2; - - // xdstp:// resource locator for cluster collection. - // [#not-implemented-hide:] - string cds_resources_locator = 6; - - // A single :ref:`ADS ` source may be optionally - // specified. This must have :ref:`api_type - // ` :ref:`GRPC - // `. Only - // :ref:`ConfigSources ` that have - // the :ref:`ads ` field set will be - // streamed on the ADS channel. - core.v4alpha.ApiConfigSource ads_config = 3; - } - - reserved 10, 11; - - reserved "runtime"; - - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - core.v4alpha.Node node = 1; - - // A list of :ref:`Node ` field names - // that will be included in the context parameters of the effective - // xdstp:// URL that is sent in a discovery request when resource - // locators are used for LDS/CDS. Any non-string field will have its JSON - // encoding set as the context parameter value, with the exception of - // metadata, which will be flattened (see example below). The supported field - // names are: - // - "cluster" - // - "id" - // - "locality.region" - // - "locality.sub_zone" - // - "locality.zone" - // - "metadata" - // - "user_agent_build_version.metadata" - // - "user_agent_build_version.version" - // - "user_agent_name" - // - "user_agent_version" - // - // The node context parameters act as a base layer dictionary for the context - // parameters (i.e. more specific resource specific context parameters will - // override). Field names will be prefixed with “udpa.node.” when included in - // context parameters. - // - // For example, if node_context_params is ``["user_agent_name", "metadata"]``, - // the implied context parameters might be:: - // - // node.user_agent_name: "envoy" - // node.metadata.foo: "{\"bar\": \"baz\"}" - // node.metadata.some: "42" - // node.metadata.thing: "\"thing\"" - // - // [#not-implemented-hide:] - repeated string node_context_params = 26; - - // Statically specified resources. - StaticResources static_resources = 2; - - // xDS configuration sources. - DynamicResources dynamic_resources = 3; - - // Configuration for the cluster manager which owns all upstream clusters - // within the server. - ClusterManager cluster_manager = 4; - - // Health discovery service config option. - // (:ref:`core.ApiConfigSource `) - core.v4alpha.ApiConfigSource hds_config = 14; - - // Optional file system path to search for startup flag files. - string flags_path = 5; - - // Optional set of stats sinks. - repeated metrics.v4alpha.StatsSink stats_sinks = 6; - - // Configuration for internal processing of stats. - metrics.v4alpha.StatsConfig stats_config = 13; - - oneof stats_flush { - // Optional duration between flushes to configured stats sinks. For - // performance reasons Envoy latches counters and only flushes counters and - // gauges at a periodic interval. If not specified the default is 5000ms (5 - // seconds). Only one of `stats_flush_interval` or `stats_flush_on_admin` - // can be set. - // Duration must be at least 1ms and at most 5 min. - google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { - lt {seconds: 300} - gte {nanos: 1000000} - }]; - - // Flush stats to sinks only when queried for on the admin interface. If set, - // a flush timer is not created. Only one of `stats_flush_on_admin` or - // `stats_flush_interval` can be set. - bool stats_flush_on_admin = 29 [(validate.rules).bool = {const: true}]; - } - - // Optional watchdog configuration. - // This is for a single watchdog configuration for the entire system. - // Deprecated in favor of *watchdogs* which has finer granularity. - Watchdog hidden_envoy_deprecated_watchdog = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional watchdogs configuration. - // This is used for specifying different watchdogs for the different subsystems. - // [#extension-category: envoy.guarddog_actions] - Watchdogs watchdogs = 27; - - // Configuration for an external tracing provider. - // - // .. attention:: - // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider - // `. - trace.v4alpha.Tracing hidden_envoy_deprecated_tracing = 9 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Configuration for the runtime configuration provider. If not - // specified, a “null” provider will be used which will result in all defaults - // being used. - LayeredRuntime layered_runtime = 17; - - // Configuration for the local administration HTTP server. - Admin admin = 12; - - // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15 [ - (udpa.annotations.security).configure_for_untrusted_downstream = true, - (udpa.annotations.security).configure_for_untrusted_upstream = true - ]; - - // Enable :ref:`stats for event dispatcher `, defaults to false. - // Note that this records a value for each iteration of the event loop on every thread. This - // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value - // over the wire individually because the statsd protocol doesn't have any way to represent a - // histogram summary. Be aware that this can be a very large volume of data. - bool enable_dispatcher_stats = 16; - - // Optional string which will be used in lieu of x-envoy in prefixing headers. - // - // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be - // transformed into x-foo-retry-on etc. - // - // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the - // headers Envoy will trust for core code and core extensions only. Be VERY careful making - // changes to this string, especially in multi-layer Envoy deployments or deployments using - // extensions which are not upstream. - string header_prefix = 18; - - // Optional proxy version which will be used to set the value of :ref:`server.version statistic - // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. - google.protobuf.UInt64Value stats_server_version_override = 19; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // This may be overridden on a per-cluster basis in cds_config, - // when :ref:`dns_resolvers ` and - // :ref:`use_tcp_for_dns_lookups ` are - // specified. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool hidden_envoy_deprecated_use_tcp_for_dns_lookups = 20 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // This may be overridden on a per-cluster basis in cds_config, when - // :ref:`dns_resolution_config ` - // is specified. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v4alpha.DnsResolutionConfig dns_resolution_config = 30; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - core.v4alpha.TypedExtensionConfig typed_dns_resolver_config = 31; - - // Specifies optional bootstrap extensions to be instantiated at startup time. - // Each item contains extension specific configuration. - // [#extension-category: envoy.bootstrap] - repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; - - // Specifies optional extensions instantiated at startup time and - // invoked during crash time on the request that caused the crash. - repeated FatalAction fatal_actions = 28; - - // Configuration sources that will participate in - // xdstp:// URL authority resolution. The algorithm is as - // follows: - // 1. The authority field is taken from the xdstp:// URL, call - // this *resource_authority*. - // 2. *resource_authority* is compared against the authorities in any peer - // *ConfigSource*. The peer *ConfigSource* is the configuration source - // message which would have been used unconditionally for resolution - // with opaque resource names. If there is a match with an authority, the - // peer *ConfigSource* message is used. - // 3. *resource_authority* is compared sequentially with the authorities in - // each configuration source in *config_sources*. The first *ConfigSource* - // to match wins. - // 4. As a fallback, if no configuration source matches, then - // *default_config_source* is used. - // 5. If *default_config_source* is not specified, resolution fails. - // [#not-implemented-hide:] - repeated core.v4alpha.ConfigSource config_sources = 22; - - // Default configuration source for xdstp:// URLs if all - // other resolution fails. - // [#not-implemented-hide:] - core.v4alpha.ConfigSource default_config_source = 23; - - // Optional overriding of default socket interface. The value must be the name of one of the - // socket interface factories initialized through a bootstrap extension - string default_socket_interface = 24; - - // Global map of CertificateProvider instances. These instances are referred to by name in the - // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name - // ` - // field. - // [#not-implemented-hide:] - map certificate_provider_instances = 25; - - // Specifies a set of headers that need to be registered as inline header. This configuration - // allows users to customize the inline headers on-demand at Envoy startup without modifying - // Envoy's source code. - // - // Note that the 'set-cookie' header cannot be registered as inline header. - repeated CustomInlineHeader inline_headers = 32; -} - -// Administration interface :ref:`operations documentation -// `. -// [#next-free-field: 6] -message Admin { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Admin"; - - // Configuration for :ref:`access logs ` - // emitted by the administration server. - repeated accesslog.v4alpha.AccessLog access_log = 5; - - // The path to write the access log for the administration server. If no - // access log is desired specify ‘/dev/null’. This is only required if - // :ref:`address ` is set. - // Deprecated in favor of *access_log* which offers more options. - string hidden_envoy_deprecated_access_log_path = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The cpu profiler output path for the administration server. If no profile - // path is specified, the default is ‘/var/log/envoy/envoy.prof’. - string profile_path = 2; - - // The TCP address that the administration server will listen on. - // If not specified, Envoy will not start an administration server. - core.v4alpha.Address address = 3; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.v4alpha.SocketOption socket_options = 4; -} - -// Cluster manager :ref:`architecture overview `. -message ClusterManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.ClusterManager"; - - message OutlierDetection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.ClusterManager.OutlierDetection"; - - // Specifies the path to the outlier event log. - string event_log_path = 1; - - // [#not-implemented-hide:] - // The gRPC service for the outlier detection event service. - // If empty, outlier detection events won't be sent to a remote endpoint. - core.v4alpha.EventServiceConfig event_service = 2; - } - - // Name of the local cluster (i.e., the cluster that owns the Envoy running - // this configuration). In order to enable :ref:`zone aware routing - // ` this option must be set. - // If *local_cluster_name* is defined then :ref:`clusters - // ` must be defined in the :ref:`Bootstrap - // static cluster resources - // `. This is unrelated to - // the :option:`--service-cluster` option which does not `affect zone aware - // routing `_. - string local_cluster_name = 1; - - // Optional global configuration for outlier detection. - OutlierDetection outlier_detection = 2; - - // Optional configuration used to bind newly established upstream connections. - // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - core.v4alpha.BindConfig upstream_bind_config = 3; - - // A management server endpoint to stream load stats to via - // *StreamLoadStats*. This must have :ref:`api_type - // ` :ref:`GRPC - // `. - core.v4alpha.ApiConfigSource load_stats_config = 4; -} - -// Allows you to specify different watchdog configs for different subsystems. -// This allows finer tuned policies for the watchdog. If a subsystem is omitted -// the default values for that system will be used. -message Watchdogs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Watchdogs"; - - // Watchdog for the main thread. - Watchdog main_thread_watchdog = 1; - - // Watchdog for the worker threads. - Watchdog worker_watchdog = 2; -} - -// Envoy process watchdog configuration. When configured, this monitors for -// nonresponsive threads and kills the process after the configured thresholds. -// See the :ref:`watchdog documentation ` for more information. -// [#next-free-field: 8] -message Watchdog { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; - - message WatchdogAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.Watchdog.WatchdogAction"; - - // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS. - // Within an event type, actions execute in the order they are configured. - // For KILL/MULTIKILL there is a default PANIC that will run after the - // registered actions and kills the process if it wasn't already killed. - // It might be useful to specify several debug actions, and possibly an - // alternate FATAL action. - enum WatchdogEvent { - UNKNOWN = 0; - KILL = 1; - MULTIKILL = 2; - MEGAMISS = 3; - MISS = 4; - } - - // Extension specific configuration for the action. - core.v4alpha.TypedExtensionConfig config = 1; - - WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // Register actions that will fire on given WatchDog events. - // See *WatchDogAction* for priority of events. - repeated WatchdogAction actions = 7; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_miss* statistic. If not specified the default is 200ms. - google.protobuf.Duration miss_timeout = 1; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_mega_miss* statistic. If not specified the default is - // 1000ms. - google.protobuf.Duration megamiss_timeout = 2; - - // If a watched thread has been nonresponsive for this duration, assume a - // programming error and kill the entire Envoy process. Set to 0 to disable - // kill behavior. If not specified the default is 0 (disabled). - google.protobuf.Duration kill_timeout = 3; - - // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is - // enabled. Enabling this feature would help to reduce risk of synchronized - // watchdog kill events across proxies due to external triggers. Set to 0 to - // disable. If not specified the default is 0 (disabled). - google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; - - // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) - // threads have been nonresponsive for at least this duration kill the entire - // Envoy process. Set to 0 to disable this behavior. If not specified the - // default is 0 (disabled). - google.protobuf.Duration multikill_timeout = 4; - - // Sets the threshold for *multikill_timeout* in terms of the percentage of - // nonresponsive threads required for the *multikill_timeout*. - // If not specified the default is 0. - type.v3.Percent multikill_threshold = 5; -} - -// Fatal actions to run while crashing. Actions can be safe (meaning they are -// async-signal safe) or unsafe. We run all safe actions before we run unsafe actions. -// If using an unsafe action that could get stuck or deadlock, it important to -// have an out of band system to terminate the process. -// -// The interface for the extension is ``Envoy::Server::Configuration::FatalAction``. -// *FatalAction* extensions live in the ``envoy.extensions.fatal_actions`` API -// namespace. -message FatalAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.FatalAction"; - - // Extension specific configuration for the action. It's expected to conform - // to the ``Envoy::Server::Configuration::FatalAction`` interface. - core.v4alpha.TypedExtensionConfig config = 1; -} - -// Runtime :ref:`configuration overview ` (deprecated). -message Runtime { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Runtime"; - - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. Envoy - // will watch the location for changes and reload the file system tree when - // they happen. If this parameter is not set, there will be no disk based - // runtime. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 2; - - // Specifies an optional subdirectory to load within the root directory. If - // specified and the directory exists, configuration values within this - // directory will override those found in the primary subdirectory. This is - // useful when Envoy is deployed across many different types of servers. - // Sometimes it is useful to have a per service cluster directory for runtime - // configuration. See below for exactly how the override directory is used. - string override_subdirectory = 3; - - // Static base runtime. This will be :ref:`overridden - // ` by other runtime layers, e.g. - // disk or admin. This follows the :ref:`runtime protobuf JSON representation - // encoding `. - google.protobuf.Struct base = 4; -} - -// [#next-free-field: 6] -message RuntimeLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.RuntimeLayer"; - - // :ref:`Disk runtime ` layer. - message DiskLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer"; - - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. - // Envoy will watch the location for changes and reload the file system tree - // when they happen. See documentation on runtime :ref:`atomicity - // ` for further details on how reloads are - // treated. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 3; - - // :ref:`Append ` the - // service cluster to the path under symlink root. - bool append_service_cluster = 2; - } - - // :ref:`Admin console runtime ` layer. - message AdminLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer"; - } - - // :ref:`Runtime Discovery Service (RTDS) ` layer. - message RtdsLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; - - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; - - // RTDS configuration source. - core.v4alpha.ConfigSource rtds_config = 2; - } - - // Descriptive name for the runtime layer. This is only used for the runtime - // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof layer_specifier { - option (validate.required) = true; - - // :ref:`Static runtime ` layer. - // This follows the :ref:`runtime protobuf JSON representation encoding - // `. Unlike static xDS resources, this static - // layer is overridable by later layers in the runtime virtual filesystem. - google.protobuf.Struct static_layer = 2; - - DiskLayer disk_layer = 3; - - AdminLayer admin_layer = 4; - - RtdsLayer rtds_layer = 5; - } -} - -// Runtime :ref:`configuration overview `. -message LayeredRuntime { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.LayeredRuntime"; - - // The :ref:`layers ` of the runtime. This is ordered - // such that later layers in the list overlay earlier entries. - repeated RuntimeLayer layers = 1; -} - -// Used to specify the header that needs to be registered as an inline header. -// -// If request or response contain multiple headers with the same name and the header -// name is registered as an inline header. Then multiple headers will be folded -// into one, and multiple header values will be concatenated by a suitable delimiter. -// The delimiter is generally a comma. -// -// For example, if 'foo' is registered as an inline header, and the headers contains -// the following two headers: -// -// .. code-block:: text -// -// foo: bar -// foo: eep -// -// Then they will eventually be folded into: -// -// .. code-block:: text -// -// foo: bar, eep -// -// Inline headers provide O(1) search performance, but each inline header imposes -// an additional memory overhead on all instances of the corresponding type of -// HeaderMap or TrailerMap. -message CustomInlineHeader { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v3.CustomInlineHeader"; - - enum InlineHeaderType { - REQUEST_HEADER = 0; - REQUEST_TRAILER = 1; - RESPONSE_HEADER = 2; - RESPONSE_TRAILER = 3; - } - - // The name of the header that is expected to be set as the inline header. - string inline_header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The type of the header that is expected to be set as the inline header. - InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 137300708e375..2e40700c3ace7 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -107,7 +107,7 @@ message Cluster { // this option or not. CLUSTER_PROVIDED = 6; - // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy + // Use the new :ref:`load_balancing_policy // ` field to determine the LB policy. // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field // and instead using the new load_balancing_policy field as the one and only mechanism for @@ -721,8 +721,7 @@ message Cluster { // The :ref:`load balancer type ` to use // when picking a host in the cluster. - // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; // Setting this is required for specifying members of // :ref:`STATIC`, @@ -783,7 +782,7 @@ message Cluster { [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Additional options when handling HTTP1 requests. - // This has been deprecated in favor of http_protocol_options fields in the in the + // This has been deprecated in favor of http_protocol_options fields in the // :ref:`http_protocol_options ` message. // http_protocol_options can be set via the cluster's // :ref:`extension_protocol_options`. @@ -799,7 +798,7 @@ message Cluster { // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. - // This has been deprecated in favor of http2_protocol_options fields in the in the + // This has been deprecated in favor of http2_protocol_options fields in the // :ref:`http_protocol_options ` // message. http2_protocol_options can be set via the cluster's // :ref:`extension_protocol_options`. @@ -1009,7 +1008,7 @@ message Cluster { // servers of this cluster. repeated Filter filters = 40; - // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the + // New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. LoadBalancingPolicy load_balancing_policy = 41; @@ -1090,7 +1089,7 @@ message Cluster { ]; } -// [#not-implemented-hide:] Extensible load balancing policy configuration. +// Extensible load balancing policy configuration. // // Every LB policy defined via this mechanism will be identified via a unique name using reverse // DNS notation. If the policy needs configuration parameters, it must define a message for its @@ -1116,10 +1115,11 @@ message LoadBalancingPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy.Policy"; - // Required. The name of the LB policy. - string name = 1; + reserved 1, 3; - google.protobuf.Any typed_config = 3; + reserved "name", "typed_config"; + + core.v3.TypedExtensionConfig typed_extension_config = 4; google.protobuf.Struct hidden_envoy_deprecated_config = 2 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD b/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD deleted file mode 100644 index 2bac8db17256c..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/circuit_breaker.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/circuit_breaker.proto deleted file mode 100644 index 36aebb8977800..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/circuit_breaker.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; -option java_outer_classname = "CircuitBreakerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Circuit breakers] - -// :ref:`Circuit breaking` settings can be -// specified individually for each defined priority. -message CircuitBreakers { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.CircuitBreakers"; - - // A Thresholds defines CircuitBreaker settings for a - // :ref:`RoutingPriority`. - // [#next-free-field: 9] - message Thresholds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.CircuitBreakers.Thresholds"; - - message RetryBudget { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget"; - - // Specifies the limit on concurrent retries as a percentage of the sum of active requests and - // active pending requests. For example, if there are 100 active requests and the - // budget_percent is set to 25, there may be 25 active retries. - // - // This parameter is optional. Defaults to 20%. - type.v3.Percent budget_percent = 1; - - // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the - // number of active retries may never go below this number. - // - // This parameter is optional. Defaults to 3. - google.protobuf.UInt32Value min_retry_concurrency = 2; - } - - // The :ref:`RoutingPriority` - // the specified CircuitBreaker settings apply to. - core.v4alpha.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; - - // The maximum number of connections that Envoy will make to the upstream - // cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_connections = 2; - - // The maximum number of pending requests that Envoy will allow to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 3; - - // The maximum number of parallel requests that Envoy will make to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_requests = 4; - - // The maximum number of parallel retries that Envoy will allow to the - // upstream cluster. If not specified, the default is 3. - google.protobuf.UInt32Value max_retries = 5; - - // Specifies a limit on concurrent retries in relation to the number of active requests. This - // parameter is optional. - // - // .. note:: - // - // If this field is set, the retry budget will override any configured retry circuit - // breaker. - RetryBudget retry_budget = 8; - - // If track_remaining is true, then stats will be published that expose - // the number of resources remaining until the circuit breakers open. If - // not specified, the default is false. - // - // .. note:: - // - // If a retry budget is used in lieu of the max_retries circuit breaker, - // the remaining retry resources remaining will not be tracked. - bool track_remaining = 6; - - // The maximum number of connection pools per cluster that Envoy will concurrently support at - // once. If not specified, the default is unlimited. Set this for clusters which create a - // large number of connection pools. See - // :ref:`Circuit Breaking ` for - // more details. - google.protobuf.UInt32Value max_connection_pools = 7; - } - - // If multiple :ref:`Thresholds` - // are defined with the same :ref:`RoutingPriority`, - // the first one in the list is used. If no Thresholds is defined for a given - // :ref:`RoutingPriority`, the default values - // are used. - repeated Thresholds thresholds = 1; -} diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto deleted file mode 100644 index 3baa5c7ec0ac9..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ /dev/null @@ -1,1161 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v4alpha; - -import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; -import "envoy/config/cluster/v4alpha/filter.proto"; -import "envoy/config/cluster/v4alpha/outlier_detection.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/health_check.proto"; -import "envoy/config/core/v4alpha/protocol.proto"; -import "envoy/config/core/v4alpha/resolver.proto"; -import "envoy/config/endpoint/v3/endpoint.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/collection_entry.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Cluster configuration] - -// Cluster list collections. Entries are *Cluster* resources or references. -// [#not-implemented-hide:] -message ClusterCollection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.ClusterCollection"; - - xds.core.v3.CollectionEntry entries = 1; -} - -// Configuration for a single upstream cluster. -// [#next-free-field: 56] -message Cluster { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; - - // Refer to :ref:`service discovery type ` - // for an explanation on each type. - enum DiscoveryType { - // Refer to the :ref:`static discovery type` - // for an explanation. - STATIC = 0; - - // Refer to the :ref:`strict DNS discovery - // type` - // for an explanation. - STRICT_DNS = 1; - - // Refer to the :ref:`logical DNS discovery - // type` - // for an explanation. - LOGICAL_DNS = 2; - - // Refer to the :ref:`service discovery type` - // for an explanation. - EDS = 3; - - // Refer to the :ref:`original destination discovery - // type` - // for an explanation. - ORIGINAL_DST = 4; - } - - // Refer to :ref:`load balancer type ` architecture - // overview section for information on each type. - enum LbPolicy { - reserved 4; - - reserved "ORIGINAL_DST_LB"; - - // Refer to the :ref:`round robin load balancing - // policy` - // for an explanation. - ROUND_ROBIN = 0; - - // Refer to the :ref:`least request load balancing - // policy` - // for an explanation. - LEAST_REQUEST = 1; - - // Refer to the :ref:`ring hash load balancing - // policy` - // for an explanation. - RING_HASH = 2; - - // Refer to the :ref:`random load balancing - // policy` - // for an explanation. - RANDOM = 3; - - // Refer to the :ref:`Maglev load balancing policy` - // for an explanation. - MAGLEV = 5; - - // This load balancer type must be specified if the configured cluster provides a cluster - // specific load balancer. Consult the configured cluster's documentation for whether to set - // this option or not. - CLUSTER_PROVIDED = 6; - - // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy - // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] - LOAD_BALANCING_POLICY_CONFIG = 7; - } - - // When V4_ONLY is selected, the DNS resolver will only perform a lookup for - // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will - // only perform a lookup for addresses in the IPv6 family. If AUTO is - // specified, the DNS resolver will first perform a lookup for addresses in - // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. - // For cluster types other than - // :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS`, - // this setting is - // ignored. - enum DnsLookupFamily { - AUTO = 0; - V4_ONLY = 1; - V6_ONLY = 2; - } - - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - - // TransportSocketMatch specifies what transport socket config will be used - // when the match conditions are satisfied. - message TransportSocketMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.TransportSocketMatch"; - - // The name of the match, used in stats generation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria. - // The connection to the endpoint with metadata matching what is set in this field - // will use the transport socket configuration specified here. - // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match - // against the values specified in this field. - google.protobuf.Struct match = 2; - - // The configuration of the transport socket. - // [#extension-category: envoy.transport_sockets.upstream] - core.v4alpha.TransportSocket transport_socket = 3; - } - - // Extended cluster type. - message CustomClusterType { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CustomClusterType"; - - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - // [#extension-category: envoy.clusters] - google.protobuf.Any typed_config = 2; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.EdsClusterConfig"; - - // Configuration for the source of EDS updates for this Cluster. - core.v4alpha.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. This may be a xdstp:// URL. - string service_name = 2; - } - - // Optionally divide the endpoints in this cluster into subsets defined by - // endpoint metadata and selected by route and weighted cluster metadata. - // [#next-free-field: 8] - message LbSubsetConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.LbSubsetConfig"; - - // If NO_FALLBACK is selected, a result - // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, - // any cluster endpoint may be returned (subject to policy, health checks, - // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - enum LbSubsetFallbackPolicy { - NO_FALLBACK = 0; - ANY_ENDPOINT = 1; - DEFAULT_SUBSET = 2; - } - - // Specifications for subsets. - message LbSubsetSelector { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector"; - - // Allows to override top level fallback policy per selector. - enum LbSubsetSelectorFallbackPolicy { - // If NOT_DEFINED top level config fallback policy is used instead. - NOT_DEFINED = 0; - - // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. - NO_FALLBACK = 1; - - // If ANY_ENDPOINT is selected, any cluster endpoint may be returned - // (subject to policy, health checks, etc). - ANY_ENDPOINT = 2; - - // If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - DEFAULT_SUBSET = 3; - - // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata - // keys reduced to - // :ref:`fallback_keys_subset`. - // It allows for a fallback to a different, less specific selector if some of the keys of - // the selector are considered optional. - KEYS_SUBSET = 4; - } - - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - - // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for - // choosing a host, but updating hosts is faster, especially for large numbers of hosts. - // - // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy. - // - // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains - // only one entry. - // - // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys` - // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge - // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are - // present in the current configuration. - bool single_host_per_subset = 4; - - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum = {defined_only: true}]; - - // Subset of - // :ref:`keys` used by - // :ref:`KEYS_SUBSET` - // fallback policy. - // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - // For any other fallback policy the parameter is not used and should not be set. - // Only values also present in - // :ref:`keys` are allowed, but - // `fallback_keys_subset` cannot be equal to `keys`. - repeated string fallback_keys_subset = 3; - } - - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - - // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique - // combination of key and value. For example: - // - // .. code-block:: json - // - // { "subset_selectors": [ - // { "keys": [ "version" ] }, - // { "keys": [ "stage", "hardware_type" ] } - // ]} - // - // A subset is matched when the metadata from the selected route and - // weighted cluster contains the same keys and values as the subset's - // metadata. The same host may appear in multiple subsets. - repeated LbSubsetSelector subset_selectors = 3; - - // If true, routing to subsets will take into account the localities and locality weights of the - // endpoints when making the routing decision. - // - // There are some potential pitfalls associated with enabling this feature, as the resulting - // traffic split after applying both a subset match and locality weights might be undesirable. - // - // Consider for example a situation in which you have 50/50 split across two localities X/Y - // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - // host selected but Y having 100, then a lot more load is being dumped on the single host in X - // than originally anticipated in the load balancing assignment delivered via EDS. - bool locality_weight_aware = 4; - - // When used with locality_weight_aware, scales the weight of each locality by the ratio - // of hosts in the subset vs hosts in the original subset. This aims to even out the load - // going to an individual locality if said locality is disproportionately affected by the - // subset predicate. - bool scale_locality_weight = 5; - - // If true, when a fallback policy is configured and its corresponding subset fails to find - // a host this will cause any host to be selected instead. - // - // This is useful when using the default subset as the fallback policy, given the default - // subset might become empty. With this option enabled, if that happens the LB will attempt - // to select a host from the entire cluster. - bool panic_mode_any = 6; - - // If true, metadata specified for a metadata key will be matched against the corresponding - // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - // and any of the elements in the list matches the criteria. - bool list_as_any = 7; - } - - // Specific configuration for the LeastRequest load balancing policy. - message LeastRequestLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.LeastRequestLbConfig"; - - // The number of random healthy hosts from which the host with the fewest active requests will - // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; - - // The following formula is used to calculate the dynamic weights when hosts have different load - // balancing weights: - // - // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` - // - // The larger the active request bias is, the more aggressively active requests will lower the - // effective weight when all host weights are not equal. - // - // `active_request_bias` must be greater than or equal to 0.0. - // - // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number - // of active requests at the time it picks a host and behaves like the Round Robin Load - // Balancer. - // - // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing - // weight by the number of active requests at the time it does a pick. - // - // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's - // host sets changes, e.g., whenever there is a host membership update or a host load balancing - // weight change. - // - // .. note:: - // This setting only takes effect if all host weights are not equal. - core.v4alpha.RuntimeDouble active_request_bias = 2; - } - - // Specific configuration for the :ref:`RingHash` - // load balancing policy. - message RingHashLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.RingHashLbConfig"; - - // The hash function used to hash hosts onto the ketama ring. - enum HashFunction { - // Use `xxHash `_, this is the default hash function. - XX_HASH = 0; - - // Use `MurmurHash2 `_, this is compatible with - // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled - // on Linux and not macOS. - MURMUR_HASH_2 = 1; - } - - reserved 2; - - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; - - // The hash function used to hash hosts onto the ketama ring. The value defaults to - // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; - - // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - // to further constrain resource use. See also - // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; - } - - // Specific configuration for the :ref:`Maglev` - // load balancing policy. - message MaglevLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.MaglevLbConfig"; - - // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. - // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same - // upstream as it was before. Increasing the table size reduces the amount of disruption. - // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. - google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; - } - - // Specific configuration for the - // :ref:`Original Destination ` - // load balancing policy. - message OriginalDstLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.OriginalDstLbConfig"; - - // When true, :ref:`x-envoy-original-dst-host - // ` can be used to override destination - // address. - // - // .. attention:: - // - // This header isn't sanitized by default, so enabling this feature allows HTTP clients to - // route traffic to arbitrary hosts and/or ports, which may have serious security - // consequences. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - bool use_http_header = 1; - } - - // Common configuration for all load balancer implementations. - // [#next-free-field: 8] - message CommonLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CommonLbConfig"; - - // Configuration for :ref:`zone aware routing - // `. - message ZoneAwareLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig"; - - // Configures percentage of requests that will be considered for zone aware routing - // if zone aware routing is configured. If not specified, the default is 100%. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - type.v3.Percent routing_enabled = 1; - - // Configures minimum upstream cluster size required for zone aware routing - // If upstream cluster size is less than specified, zone aware routing is not performed - // even if zone aware routing is configured. If not specified, the default is 6. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - google.protobuf.UInt64Value min_cluster_size = 2; - - // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - // mode`. Instead, the cluster will fail all - // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - // failing service. - bool fail_traffic_on_panic = 3; - } - - // Configuration for :ref:`locality weighted load balancing - // ` - message LocalityWeightedLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; - } - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - message ConsistentHashingLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; - - // If set to `true`, the cluster will use hostname instead of the resolved - // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. - bool use_hostname_for_hashing = 1; - - // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 - // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. - // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. - // Minimum is 100. - // - // Applies to both Ring Hash and Maglev load balancers. - // - // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified - // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests - // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing - // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify - // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the - // cascading overflow effect when choosing the next host in the ring/table). - // - // If weights are specified on the hosts, they are respected. - // - // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts - // being probed, so use a higher value if you require better performance. - google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}]; - } - - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - type.v3.Percent healthy_panic_threshold = 1; - - oneof locality_config_specifier { - ZoneAwareLbConfig zone_aware_lb_config = 2; - - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } - - // If set, all health check/weight/metadata updates that happen within this duration will be - // merged and delivered in one shot when the duration expires. The start of the duration is when - // the first update happens. This is useful for big clusters, with potentially noisy deploys - // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - // cluster). Please always keep in mind that the use of sandbox technologies may change this - // behavior. - // - // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - // window to 0. - // - // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - // because merging those updates isn't currently safe. See - // https://github.com/envoyproxy/envoy/pull/3941. - google.protobuf.Duration update_merge_window = 4; - - // If set to true, Envoy will :ref:`exclude ` new hosts - // when computing load balancing weights until they have been health checked for the first time. - // This will have no effect unless active health checking is also configured. - bool ignore_new_hosts_until_first_hc = 5; - - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - bool close_connections_on_host_set_change = 6; - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - ConsistentHashingLbConfig consistent_hashing_lb_config = 7; - } - - message RefreshRate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.RefreshRate"; - - // Specifies the base interval between refreshes. This parameter is required and must be greater - // than zero and less than - // :ref:`max_interval `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {nanos: 1000000} - }]; - - // Specifies the maximum interval between refreshes. This parameter is optional, but must be - // greater than or equal to the - // :ref:`base_interval ` if set. The default - // is 10 times the :ref:`base_interval `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; - } - - message PreconnectPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.PreconnectPolicy"; - - // Indicates how many streams (rounded up) can be anticipated per-upstream for each - // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting - // will only be done if the upstream is healthy and the cluster has traffic. - // - // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be - // established, one for the new incoming stream, and one for a presumed follow-up stream. For - // HTTP/2, only one connection would be established by default as one connection can - // serve both the original and presumed follow-up stream. - // - // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 - // active streams, there would be 100 connections in use, and 50 connections preconnected. - // This might be a useful value for something like short lived single-use connections, - // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection - // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP - // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more - // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue - // in case of unexpected disconnects where the connection could not be reused. - // - // If this value is not set, or set explicitly to one, Envoy will fetch as many connections - // as needed to serve streams in flight. This means in steady state if a connection is torn down, - // a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. - // - // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can - // harm latency more than the preconnecting helps. - google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 - [(validate.rules).double = {lte: 3.0 gte: 1.0}]; - - // Indicates how many many streams (rounded up) can be anticipated across a cluster for each - // stream, useful for low QPS services. This is currently supported for a subset of - // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike *per_upstream_preconnect_ratio* this preconnects across the upstream instances in a - // cluster, doing best effort predictions of what upstream would be picked next and - // pre-establishing a connection. - // - // Preconnecting will be limited to one preconnect per configured upstream in the cluster and will - // only be done if there are healthy upstreams and the cluster has traffic. - // - // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first - // incoming stream, 2 connections will be preconnected - one to the first upstream for this - // cluster, one to the second on the assumption there will be a follow-up stream. - // - // If this value is not set, or set explicitly to one, Envoy will fetch as many connections - // as needed to serve streams in flight, so during warm up and in steady state if a connection - // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for - // connection establishment. - // - // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, - // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each - // upstream. - google.protobuf.DoubleValue predictive_preconnect_ratio = 2 - [(validate.rules).double = {lte: 3.0 gte: 1.0}]; - } - - reserved 12, 15, 7, 11, 35; - - reserved "hosts", "tls_context", "extension_protocol_options"; - - // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket_match* in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "enableMTLS" - // match: - // acceptMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - name: "defaultToPlaintext" - // match: {} - // transport_socket: - // name: envoy.transport_sockets.raw_buffer - // - // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* - // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - // - // If a :ref:`socket match ` with empty match - // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - // socket match in case above. - // - // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - // *transport_socket* specified in this cluster. - // - // This field allows gradual and flexible transport socket configuration changes. - // - // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - // has "acceptPlaintext": "true" metadata information. - // - // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - // *TransportSocketMatch* in this field. Other client Envoys receive CDS without - // *transport_socket_match* set, and still send plain text traffic to the same cluster. - // - // This field can be used to specify custom transport socket configurations for health - // checks by adding matching key/value pairs in a health check's - // :ref:`transport socket match criteria ` field. - // - // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - repeated TransportSocketMatch transport_socket_matches = 43; - - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // An optional alternative to the cluster name to be used for observability. This name is used - // emitting stats for the cluster and access logging the cluster name. This will appear as - // additional information in configuration dumps of a cluster's current status as - // :ref:`observability_name ` - // and as an additional tag "upstream_cluster.name" while tracing. Note: access logging using - // this field is presently enabled with runtime feature - // `envoy.reloadable_features.use_observable_cluster_name`. Any ``:`` in the name will be - // converted to ``_`` when emitting statistics. This should not be confused with :ref:`Router - // Filter Header `. - string observability_name = 28; - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - // If not set, a default value of 5s will be used. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 - [(udpa.annotations.security).configure_for_untrusted_upstream = true]; - - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes the *hosts* field in the v2 API. - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // - endpoint.v3.ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.v4alpha.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - // - // .. attention:: - // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. - google.protobuf.UInt32Value hidden_envoy_deprecated_max_requests_per_connection = 9 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional :ref:`circuit breaking ` for the cluster. - CircuitBreakers circuit_breakers = 10; - - // HTTP protocol options that are applied only to upstream HTTP connections. - // These options apply to all HTTP versions. - // This has been deprecated in favor of - // :ref:`upstream_http_protocol_options ` - // in the :ref:`http_protocol_options ` message. - // upstream_http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v4alpha.UpstreamHttpProtocolOptions hidden_envoy_deprecated_upstream_http_protocol_options = - 46 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Additional options when handling HTTP requests upstream. These options will be applicable to - // both HTTP1 and HTTP2 requests. - // This has been deprecated in favor of - // :ref:`common_http_protocol_options ` - // in the :ref:`http_protocol_options ` message. - // common_http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v4alpha.HttpProtocolOptions hidden_envoy_deprecated_common_http_protocol_options = 29 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Additional options when handling HTTP1 requests. - // This has been deprecated in favor of http_protocol_options fields in the in the - // :ref:`http_protocol_options ` message. - // http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v4alpha.Http1ProtocolOptions hidden_envoy_deprecated_http_protocol_options = 13 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - // This has been deprecated in favor of http2_protocol_options fields in the in the - // :ref:`http_protocol_options ` - // message. http2_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v4alpha.Http2ProtocolOptions hidden_envoy_deprecated_http2_protocol_options = 14 [ - deprecated = true, - (udpa.annotations.security).configure_for_untrusted_upstream = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - // [#next-major-version: make this a list of typed extensions.] - map typed_extension_protocol_options = 36; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. The value configured must be at least 1ms. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; - - // If the DNS failure refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - // other than :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS` this setting is - // ignored. - RefreshRate dns_failure_refresh_rate = 44; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; - - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; - - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - repeated core.v4alpha.Address hidden_envoy_deprecated_dns_resolvers = 18 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool hidden_envoy_deprecated_use_tcp_for_dns_lookups = 45 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v4alpha.DnsResolutionConfig dns_resolution_config = 53; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - core.v4alpha.TypedExtensionConfig typed_dns_resolver_config = 55; - - // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`. - // If true, cluster readiness blocks on warm-up. If false, the cluster will complete - // initialization whether or not warm-up has completed. Defaults to true. - google.protobuf.BoolValue wait_for_warm_on_init = 54; - - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - OutlierDetection outlier_detection = 19; - - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; - - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.v4alpha.BindConfig upstream_bind_config = 21; - - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH`, - // :ref:`MAGLEV` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - - // Optional configuration for the Maglev load balancing policy. - MaglevLbConfig maglev_lb_config = 52; - - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - - // Common configuration for all load balancer implementations. - CommonLbConfig common_lb_config = 27; - - // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`UpstreamTlsContexts ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - core.v4alpha.TransportSocket transport_socket = 24; - - // The Metadata field can be used to provide additional information about the - // cluster. It can be used for stats, logging, and varying filter behavior. - // Fields should use reverse DNS notation to denote which entity within Envoy - // will need the information. For instance, if the metadata is intended for - // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - core.v4alpha.Metadata metadata = 25; - - // Determines how Envoy selects the protocol used to speak to upstream hosts. - // This has been deprecated in favor of setting explicit protocol selection - // in the :ref:`http_protocol_options - // ` message. - // http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - ClusterProtocolSelection hidden_envoy_deprecated_protocol_selection = 26 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional options for upstream connections. - UpstreamConnectionOptions upstream_connection_options = 30; - - // If an upstream host becomes unhealthy (as determined by the configured health checks - // or outlier detection), immediately close all connections to the failed host. - // - // .. note:: - // - // This is currently only supported for connections created by tcp_proxy. - // - // .. note:: - // - // The current implementation of this feature closes all connections immediately when - // the unhealthy status is detected. If there are a large number of connections open - // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - // time exclusively closing these connections, and not processing any other traffic. - bool close_connections_on_host_health_failure = 31; - - // If set to true, Envoy will ignore the health value of a host when processing its removal - // from service discovery. This means that if active health checking is used, Envoy will *not* - // wait for the endpoint to go unhealthy before removing it. - bool ignore_health_on_host_removal = 32; - - // An (optional) network filter chain, listed in the order the filters should be applied. - // The chain will be applied to all outgoing connections that Envoy makes to the upstream - // servers of this cluster. - repeated Filter filters = 40; - - // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - LoadBalancingPolicy load_balancing_policy = 41; - - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - core.v4alpha.ConfigSource lrs_server = 42; - - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - // - // .. attention:: - // - // This field has been deprecated in favor of `timeout_budgets`, part of - // :ref:`track_cluster_stats `. - bool hidden_envoy_deprecated_track_timeout_budgets = 47 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional customization and configuration of upstream connection pool, and upstream type. - // - // Currently this field only applies for HTTP traffic but is designed for eventual use for custom - // TCP upstreams. - // - // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream - // HTTP, using the http connection pool and the codec from `http2_protocol_options` - // - // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT - // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. - // - // The default pool used is the generic connection pool which creates the HTTP upstream for most - // HTTP requests, and the TCP upstream if CONNECT termination is configured. - // - // If users desire custom connection pool or upstream behavior, for example terminating - // CONNECT only if a custom filter indicates it is appropriate, the custom factories - // can be registered and configured here. - // [#extension-category: envoy.upstreams] - core.v4alpha.TypedExtensionConfig upstream_config = 48; - - // Configuration to track optional cluster stats. - TrackClusterStats track_cluster_stats = 49; - - // Preconnect configuration for this cluster. - PreconnectPolicy preconnect_policy = 50; - - // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate - // connection pool for every downstream connection - bool connection_pool_per_downstream_connection = 51; -} - -// [#not-implemented-hide:] Extensible load balancing policy configuration. -// -// Every LB policy defined via this mechanism will be identified via a unique name using reverse -// DNS notation. If the policy needs configuration parameters, it must define a message for its -// own configuration, which will be stored in the config field. The name of the policy will tell -// clients which type of message they should expect to see in the config field. -// -// Note that there are cases where it is useful to be able to independently select LB policies -// for choosing a locality and for choosing an endpoint within that locality. For example, a -// given deployment may always use the same policy to choose the locality, but for choosing the -// endpoint within the locality, some clusters may use weighted-round-robin, while others may -// use some sort of session-based balancing. -// -// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a -// child LB policy for each locality. For each request, the parent chooses the locality and then -// delegates to the child policy for that locality to choose the endpoint within the locality. -// -// To facilitate this, the config message for the top-level LB policy may include a field of -// type LoadBalancingPolicy that specifies the child policy. -message LoadBalancingPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.LoadBalancingPolicy"; - - message Policy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.LoadBalancingPolicy.Policy"; - - reserved 2; - - reserved "config"; - - // Required. The name of the LB policy. - string name = 1; - - google.protobuf.Any typed_config = 3; - } - - // Each client will iterate over the list in order and stop at the first policy that it - // supports. This provides a mechanism for starting to use new LB policies that are not yet - // supported by all clients. - repeated Policy policies = 1; -} - -// An extensible structure containing the address Envoy should bind to when -// establishing upstream connections. -message UpstreamBindConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.UpstreamBindConfig"; - - // The address Envoy should bind to when establishing upstream connections. - core.v4alpha.Address source_address = 1; -} - -message UpstreamConnectionOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.UpstreamConnectionOptions"; - - // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - core.v4alpha.TcpKeepalive tcp_keepalive = 1; -} - -message TrackClusterStats { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.TrackClusterStats"; - - // If timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool timeout_budgets = 1; - - // If request_response_sizes is true, then the :ref:`histograms - // ` tracking header and body sizes - // of requests and responses will be published. - bool request_response_sizes = 2; -} diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto deleted file mode 100644 index d478fd34f1c79..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/filter.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v4alpha; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; -option java_outer_classname = "FilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Upstream filters] -// Upstream filters apply to the connections to the upstream cluster hosts. - -message Filter { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Filter"; - - // The name of the filter to instantiate. The name must match a - // supported upstream filter. Note that Envoy's :ref:`downstream network - // filters ` are not valid upstream filters. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any typed_config = 2; -} diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto deleted file mode 100644 index a64c4b42247fc..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto +++ /dev/null @@ -1,157 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v4alpha; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; -option java_outer_classname = "OutlierDetectionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Outlier detection] - -// See the :ref:`architecture overview ` for -// more information on outlier detection. -// [#next-free-field: 22] -message OutlierDetection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.OutlierDetection"; - - // The number of consecutive 5xx responses or local origin errors that are mapped - // to 5xx error codes before a consecutive 5xx ejection - // occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_5xx = 1; - - // The time interval between ejection analysis sweeps. This can result in - // both new ejections as well as hosts being returned to service. Defaults - // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; - - // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected and is - // capped by :ref:`max_ejection_time`. - // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; - - // The maximum % of an upstream cluster that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive 5xx. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics. This setting can be used to - // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; - - // The number of hosts in a cluster that must have enough request volume to - // detect success rate outliers. If the number of hosts is less than this - // setting, outlier detection via success rate statistics is not performed - // for any host in the cluster. Defaults to 5. - google.protobuf.UInt32Value success_rate_minimum_hosts = 7; - - // The minimum number of total requests that must be collected in one - // interval (as defined by the interval duration above) to include this host - // in success rate based outlier detection. If the volume is lower than this - // setting, outlier detection via success rate statistics is not performed - // for that host. Defaults to 100. - google.protobuf.UInt32Value success_rate_request_volume = 8; - - // This factor is used to determine the ejection threshold for success rate - // outlier ejection. The ejection threshold is the difference between the - // mean success rate, and the product of this factor and the standard - // deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - google.protobuf.UInt32Value success_rate_stdev_factor = 9; - - // The number of consecutive gateway failures (502, 503, 504 status codes) - // before a consecutive gateway failure ejection occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_gateway_failure = 10; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive gateway failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32 = {lte: 100}]; - - // Determines whether to distinguish local origin failures from external errors. If set to true - // the following configuration parameters are taken into account: - // :ref:`consecutive_local_origin_failure`, - // :ref:`enforcing_consecutive_local_origin_failure` - // and - // :ref:`enforcing_local_origin_success_rate`. - // Defaults to false. - bool split_external_local_origin_errors = 12; - - // The number of consecutive locally originated failures before ejection - // occurs. Defaults to 5. Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value consecutive_local_origin_failure = 13; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive locally originated failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics for locally originated errors. - // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32 = {lte: 100}]; - - // The failure percentage to use when determining failure percentage-based outlier detection. If - // the failure percentage of a given host is greater than or equal to this value, it will be - // ejected. Defaults to 85. - google.protobuf.UInt32Value failure_percentage_threshold = 16 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // failure percentage statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 0. - // - // [#next-major-version: setting this without setting failure_percentage_threshold should be - // invalid in v4.] - google.protobuf.UInt32Value enforcing_failure_percentage = 17 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // local-origin failure percentage statistics. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 - [(validate.rules).uint32 = {lte: 100}]; - - // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. - // If the total number of hosts in the cluster is less than this value, failure percentage-based - // ejection will not be performed. Defaults to 5. - google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; - - // The minimum number of total requests that must be collected in one interval (as defined by the - // interval duration above) to perform failure percentage-based ejection for this host. If the - // volume is lower than this setting, failure percentage-based ejection will not be performed for - // this host. Defaults to 50. - google.protobuf.UInt32Value failure_percentage_request_volume = 20; - - // The maximum time that a host is ejected for. See :ref:`base_ejection_time` - // for more information. If not specified, the default value (300000ms or 300s) or - // :ref:`base_ejection_time` value is applied, whatever is larger. - google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; -} diff --git a/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD b/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD deleted file mode 100644 index 8c0f8a2e08d8b..0000000000000 --- a/generated_api_shadow/envoy/config/common/matcher/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/matcher/v3:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto b/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto deleted file mode 100644 index 2027331b31da3..0000000000000 --- a/generated_api_shadow/envoy/config/common/matcher/v4alpha/matcher.proto +++ /dev/null @@ -1,269 +0,0 @@ -syntax = "proto3"; - -package envoy.config.common.matcher.v4alpha; - -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.common.matcher.v4alpha"; -option java_outer_classname = "MatcherProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Unified Matcher API] - -// A matcher, which may traverse a matching tree in order to result in a match action. -// During matching, the tree will be traversed until a match is found, or if no match -// is found the action specified by the most specific on_no_match will be evaluated. -// As an on_no_match might result in another matching tree being evaluated, this process -// might repeat several times until the final OnMatch (or no match) is decided. -// -// [#alpha:] -message Matcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher"; - - // What to do if a match is successful. - message OnMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.OnMatch"; - - oneof on_match { - option (validate.required) = true; - - // Nested matcher to evaluate. - // If the nested matcher does not match and does not specify - // on_no_match, then this matcher is considered not to have - // matched, even if a predicate at this level or above returned - // true. - Matcher matcher = 1; - - // Protocol-specific action to take. - core.v4alpha.TypedExtensionConfig action = 2; - } - } - - // A linear list of field matchers. - // The field matchers are evaluated in order, and the first match - // wins. - message MatcherList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList"; - - // Predicate to determine if a match is successful. - message Predicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate"; - - // Predicate for a single input field. - message SinglePredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate"; - - // Protocol-specific specification of input field to match on. - // [#extension-category: envoy.matching.common_inputs] - core.v4alpha.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; - - oneof matcher { - option (validate.required) = true; - - // Built-in string matcher. - type.matcher.v4alpha.StringMatcher value_match = 2; - - // Extension for custom matching logic. - // [#extension-category: envoy.matching.input_matchers] - core.v4alpha.TypedExtensionConfig custom_match = 3; - } - } - - // A list of two or more matchers. Used to allow using a list within a oneof. - message PredicateList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList"; - - repeated Predicate predicate = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof match_type { - option (validate.required) = true; - - // A single predicate to evaluate. - SinglePredicate single_predicate = 1; - - // A list of predicates to be OR-ed together. - PredicateList or_matcher = 2; - - // A list of predicates to be AND-ed together. - PredicateList and_matcher = 3; - - // The invert of a predicate - Predicate not_matcher = 4; - } - } - - // An individual matcher. - message FieldMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher"; - - // Determines if the match succeeds. - Predicate predicate = 1 [(validate.rules).message = {required: true}]; - - // What to do if the match succeeds. - OnMatch on_match = 2 [(validate.rules).message = {required: true}]; - } - - // A list of matchers. First match wins. - repeated FieldMatcher matchers = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - message MatcherTree { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherTree"; - - // A map of configured matchers. Used to allow using a map within a oneof. - message MatchMap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap"; - - map map = 1 [(validate.rules).map = {min_pairs: 1}]; - } - - // Protocol-specific specification of input field to match on. - core.v4alpha.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; - - // Exact or prefix match maps in which to look up the input value. - // If the lookup succeeds, the match is considered successful, and - // the corresponding OnMatch is used. - oneof tree_type { - option (validate.required) = true; - - MatchMap exact_match_map = 2; - - // Longest matching prefix wins. - MatchMap prefix_match_map = 3; - - // Extension for custom matching logic. - core.v4alpha.TypedExtensionConfig custom_match = 4; - } - } - - oneof matcher_type { - option (validate.required) = true; - - // A linear list of matchers to evaluate. - MatcherList matcher_list = 1; - - // A match tree to evaluate. - MatcherTree matcher_tree = 2; - } - - // Optional OnMatch to use if the matcher failed. - // If specified, the OnMatch is used, and the matcher is considered - // to have matched. - // If not specified, the matcher is considered not to have matched. - OnMatch on_no_match = 3; -} - -// Match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 11] -message MatchPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.MatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.MatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - - // HTTP request generic body match configuration. - HttpGenericBodyMatch http_request_generic_body_match = 9; - - // HTTP response generic body match configuration. - HttpGenericBodyMatch http_response_generic_body_match = 10; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.HttpHeadersMatch"; - - // HTTP headers to match. - repeated route.v4alpha.HeaderMatcher headers = 1; -} - -// HTTP generic body match configuration. -// List of text strings and hex strings to be located in HTTP body. -// All specified strings must be found in the HTTP body for positive match. -// The search may be limited to specified number of bytes from the body start. -// -// .. attention:: -// -// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. -// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified -// to scan only part of the http body. -message HttpGenericBodyMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.HttpGenericBodyMatch"; - - message GenericTextMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch"; - - oneof rule { - option (validate.required) = true; - - // Text string to be located in HTTP body. - string string_match = 1 [(validate.rules).string = {min_len: 1}]; - - // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; - } - } - - // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). - uint32 bytes_limit = 1; - - // List of patterns to match. - repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index 6589a3ed3a1a4..8f2347eb55179 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -73,7 +73,7 @@ message UpstreamHttpProtocolOptions { // Configures the alternate protocols cache which tracks alternate protocols that can be used to // make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for -// HTTP Alternate Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 +// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 // for the "HTTPS" DNS resource record. message AlternateProtocolsCacheOptions { // The name of the cache. Multiple named caches allow independent alternate protocols cache diff --git a/generated_api_shadow/envoy/config/core/v4alpha/BUILD b/generated_api_shadow/envoy/config/core/v4alpha/BUILD deleted file mode 100644 index c9e435fda9a99..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/core/v4alpha/address.proto b/generated_api_shadow/envoy/config/core/v4alpha/address.proto deleted file mode 100644 index 63d4d4a145075..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/address.proto +++ /dev/null @@ -1,163 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/socket_option.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "AddressProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Network addresses] - -message Pipe { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Pipe"; - - // Unix Domain Socket path. On Linux, paths starting with '@' will use the - // abstract namespace. The starting '@' is replaced by a null byte by Envoy. - // Paths starting with '@' will result in an error in environments other than - // Linux. - string path = 1 [(validate.rules).string = {min_len: 1}]; - - // The mode for the Pipe. Not applicable for abstract sockets. - uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; -} - -// [#not-implemented-hide:] The address represents an envoy internal listener. -// TODO(lambdai): Make this address available for listener and endpoint. -// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. -message EnvoyInternalAddress { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.EnvoyInternalAddress"; - - oneof address_name_specifier { - option (validate.required) = true; - - // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. - string server_listener_name = 1; - } -} - -// [#next-free-field: 7] -message SocketAddress { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketAddress"; - - enum Protocol { - TCP = 0; - UDP = 1; - } - - Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; - - // The address for this socket. :ref:`Listeners ` will bind - // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` - // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: - // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used - // within an upstream :ref:`BindConfig `, the address - // controls the source address of outbound connections. For :ref:`clusters - // `, the cluster type determines whether the - // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_len: 1}]; - - oneof port_specifier { - option (validate.required) = true; - - uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - string named_port = 4; - } - - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. - string resolver_name = 5; - - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. - bool ipv4_compat = 6; -} - -message TcpKeepalive { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.TcpKeepalive"; - - // Maximum number of keepalive probes to send without response before deciding - // the connection is dead. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 9.) - google.protobuf.UInt32Value keepalive_probes = 1; - - // The number of seconds a connection needs to be idle before keep-alive probes - // start being sent. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 7200s (i.e., 2 hours.) - google.protobuf.UInt32Value keepalive_time = 2; - - // The number of seconds between keep-alive probes. Default is to use the OS - // level configuration (unless overridden, Linux defaults to 75s.) - google.protobuf.UInt32Value keepalive_interval = 3; -} - -message BindConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BindConfig"; - - // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; - - // Whether to set the *IP_FREEBIND* option when creating the socket. When this - // flag is set to true, allows the :ref:`source_address - // ` to be an IP address - // that is not configured on the system running Envoy. When this flag is set - // to false, the option *IP_FREEBIND* is disabled on the socket. When this - // flag is not set (default), the socket is not modified, i.e. the option is - // neither enabled nor disabled. - google.protobuf.BoolValue freebind = 2; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated SocketOption socket_options = 3; -} - -// Addresses specify either a logical or physical address and port, which are -// used to tell Envoy where to bind/listen, connect to upstream and find -// management servers. -message Address { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Address"; - - oneof address { - option (validate.required) = true; - - SocketAddress socket_address = 1; - - Pipe pipe = 2; - - // [#not-implemented-hide:] - EnvoyInternalAddress envoy_internal_address = 3; - } -} - -// CidrRange specifies an IP Address and a prefix length to construct -// the subnet mask for a `CIDR `_ range. -message CidrRange { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.CidrRange"; - - // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/backoff.proto b/generated_api_shadow/envoy/config/core/v4alpha/backoff.proto deleted file mode 100644 index 266d57f84e74a..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/backoff.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "BackoffProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Backoff Strategy] - -// Configuration defining a jittered exponential back off strategy. -message BackoffStrategy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.BackoffStrategy"; - - // The base interval to be used for the next back off computation. It should - // be greater than zero and less than or equal to :ref:`max_interval - // `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, - // but must be greater than or equal to the :ref:`base_interval - // ` if set. The default - // is 10 times the :ref:`base_interval - // `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/base.proto b/generated_api_shadow/envoy/config/core/v4alpha/base.proto deleted file mode 100644 index 99ce121ddf63f..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/base.proto +++ /dev/null @@ -1,465 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/backoff.proto"; -import "envoy/config/core/v4alpha/http_uri.proto"; -import "envoy/type/v3/percent.proto"; -import "envoy/type/v3/semantic_version.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/context_params.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "BaseProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common types] - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; -} - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} - -// Identifies location of where either Envoy runs or where upstream hosts run. -message Locality { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Locality"; - - // Region this :ref:`zone ` belongs to. - string region = 1; - - // Defines the local service zone where Envoy is running. Though optional, it - // should be set if discovery service routing is used and the discovery - // service exposes :ref:`zone data `, - // either in this message or via :option:`--service-zone`. The meaning of zone - // is context dependent, e.g. `Availability Zone (AZ) - // `_ - // on AWS, `Zone `_ on - // GCP, etc. - string zone = 2; - - // When used for locality of upstream hosts, this field further splits zone - // into smaller chunks of sub-zones so they can be load balanced - // independently. - string sub_zone = 3; -} - -// BuildVersion combines SemVer version of extension with free-form build information -// (i.e. 'alpha', 'private-build') as a set of strings. -message BuildVersion { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BuildVersion"; - - // SemVer version of extension. - type.v3.SemanticVersion version = 1; - - // Free-form build information. - // Envoy defines several well known keys in the source/common/version/version.h file - google.protobuf.Struct metadata = 2; -} - -// Version and identification for an Envoy extension. -// [#next-free-field: 6] -message Extension { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Extension"; - - // This is the name of the Envoy filter as specified in the Envoy - // configuration, e.g. envoy.filters.http.router, com.acme.widget. - string name = 1; - - // Category of the extension. - // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" - // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from - // acme.com vendor. - // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] - string category = 2; - - // [#not-implemented-hide:] Type descriptor of extension configuration proto. - // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] - // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - string type_descriptor = 3; - - // The version is a property of the extension and maintained independently - // of other extensions and the Envoy API. - // This field is not set when extension did not provide version information. - BuildVersion version = 4; - - // Indicates that the extension is present but was disabled via dynamic configuration. - bool disabled = 5; -} - -// Identifies a specific Envoy instance. The node identifier is presented to the -// management server, which may use this identifier to distinguish per Envoy -// configuration for serving. -// [#next-free-field: 13] -message Node { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Node"; - - reserved 5; - - reserved "build_version"; - - // An opaque node identifier for the Envoy node. This also provides the local - // service node name. It should be set if any of the following features are - // used: :ref:`statsd `, :ref:`CDS - // `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-node`. - string id = 1; - - // Defines the local service cluster name where Envoy is running. Though - // optional, it should be set if any of the following features are used: - // :ref:`statsd `, :ref:`health check cluster - // verification - // `, - // :ref:`runtime override directory `, - // :ref:`user agent addition - // `, - // :ref:`HTTP global rate limiting `, - // :ref:`CDS `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-cluster`. - string cluster = 2; - - // Opaque metadata extending the node identifier. Envoy will pass this - // directly to the management server. - google.protobuf.Struct metadata = 3; - - // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike - // other fields in this message). For example, the xDS client may have a shard identifier that - // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the - // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic - // parameter then appears in this field during future discovery requests. - map dynamic_parameters = 12; - - // Locality specifying where the Envoy instance is running. - Locality locality = 4; - - // Free-form string that identifies the entity requesting config. - // E.g. "envoy" or "grpc" - string user_agent_name = 6; - - oneof user_agent_version_type { - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - string user_agent_version = 7; - - // Structured version of the entity requesting config. - BuildVersion user_agent_build_version = 8; - } - - // List of extensions and their versions supported by the node. - repeated Extension extensions = 9; - - // Client feature support list. These are well known features described - // in the Envoy API repository for a given major version of an API. Client features - // use reverse DNS naming scheme, for example `com.acme.feature`. - // See :ref:`the list of features ` that xDS client may - // support. - repeated string client_features = 10; - - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - repeated Address hidden_envoy_deprecated_listening_addresses = 11 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// Metadata provides additional inputs to filters based on matched listeners, -// filter chains, routes and endpoints. It is structured as a map, usually from -// filter name (in reverse DNS format) to metadata specific to the filter. Metadata -// key-values for a filter are merged as connection and request handling occurs, -// with later values for the same key overriding earlier values. -// -// An example use of metadata is providing additional values to -// http_connection_manager in the envoy.http_connection_manager.access_log -// namespace. -// -// Another example use of metadata is to per service config info in cluster metadata, which may get -// consumed by multiple filters. -// -// For load balancing, Metadata provides a means to subset cluster endpoints. -// Endpoints have a Metadata object associated and routes contain a Metadata -// object to match against. There are some well defined metadata used today for -// this purpose: -// -// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an -// endpoint and is also used during header processing -// (x-envoy-upstream-canary) and for stats purposes. -// [#next-major-version: move to type/metadata/v2] -message Metadata { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Metadata"; - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - // If both *filter_metadata* and - // :ref:`typed_filter_metadata ` - // fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - map filter_metadata = 1; - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - // The value is encoded as google.protobuf.Any. - // If both :ref:`filter_metadata ` - // and *typed_filter_metadata* fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - map typed_filter_metadata = 2; -} - -// Runtime derived uint32 with a default when not specified. -message RuntimeUInt32 { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeUInt32"; - - // Default value if runtime value is not available. - uint32 default_value = 2; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived percentage with a default when not specified. -message RuntimePercent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RuntimePercent"; - - // Default value if runtime value is not available. - type.v3.Percent default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived double with a default when not specified. -message RuntimeDouble { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeDouble"; - - // Default value if runtime value is not available. - double default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived bool with a default when not specified. -message RuntimeFeatureFlag { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RuntimeFeatureFlag"; - - // Default value if runtime value is not available. - google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key to get value for comparison. This value is used if defined. The boolean value must - // be represented via its - // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Header name/value pair. -message HeaderValue { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderValue"; - - // Header name. - string key = 1 - [(validate.rules).string = - {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Header value. - // - // The same :ref:`format specifier ` as used for - // :ref:`HTTP access logging ` applies here, however - // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [ - (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; -} - -// Header name/value pair plus option to control append behavior. -message HeaderValueOption { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HeaderValueOption"; - - // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message = {required: true}]; - - // Should the value be appended? If true (default), the value is appended to - // existing values. Otherwise it replaces any existing values. - google.protobuf.BoolValue append = 2; -} - -// Wrapper for a set of headers. -message HeaderMap { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderMap"; - - repeated HeaderValue headers = 1; -} - -// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename -// events inside this directory trigger the watch. -message WatchedDirectory { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.WatchedDirectory"; - - // Directory path to watch. - string path = 1 [(validate.rules).string = {min_len: 1}]; -} - -// Data source consisting of either a file or an inline value. -message DataSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.DataSource"; - - oneof specifier { - option (validate.required) = true; - - // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_len: 1}]; - - // Bytes inlined in the configuration. - bytes inline_bytes = 2; - - // String inlined in the configuration. - string inline_string = 3; - } -} - -// The message specifies the retry policy of remote data source when fetching fails. -message RetryPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RetryPolicy"; - - // Specifies parameters that control :ref:`retry backoff strategy `. - // This parameter is optional, in which case the default base interval is 1000 milliseconds. The - // default maximum interval is 10 times the base interval. - BackoffStrategy retry_back_off = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. - google.protobuf.UInt32Value max_retries = 2; -} - -// The message specifies how to fetch data from remote and how to verify it. -message RemoteDataSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RemoteDataSource"; - - // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; - - // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_len: 1}]; - - // Retry policy for fetching remote data. - RetryPolicy retry_policy = 3; -} - -// Async data source which support async data fetch. -message AsyncDataSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.AsyncDataSource"; - - oneof specifier { - option (validate.required) = true; - - // Local async data source. - DataSource local = 1; - - // Remote async data source. - RemoteDataSource remote = 2; - } -} - -// Configuration for transport socket in :ref:`listeners ` and -// :ref:`clusters `. If the configuration is -// empty, a default transport socket implementation and configuration will be -// chosen based on the platform and existence of tls_context. -message TransportSocket { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.TransportSocket"; - - reserved 2; - - reserved "config"; - - // The name of the transport socket to instantiate. The name must match a supported transport - // socket implementation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Implementation specific configuration which depends on the implementation being instantiated. - // See the supported transport socket implementations for further documentation. - oneof config_type { - google.protobuf.Any typed_config = 3; - } -} - -// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not -// specified via a runtime key. -// -// .. note:: -// -// Parsing of the runtime key's data is implemented such that it may be represented as a -// :ref:`FractionalPercent ` proto represented as JSON/YAML -// and may also be represented as an integer with the assumption that the value is an integral -// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse -// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. -message RuntimeFractionalPercent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RuntimeFractionalPercent"; - - // Default value if the runtime value's for the numerator/denominator keys are not available. - type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key for a YAML representation of a FractionalPercent. - string runtime_key = 2; -} - -// Identifies a specific ControlPlane instance that Envoy is connected to. -message ControlPlane { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ControlPlane"; - - // An opaque control plane identifier that uniquely identifies an instance - // of control plane. This can be used to identify which control plane instance, - // the Envoy is connected to. - string identifier = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto b/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto deleted file mode 100644 index 34f8a8bdb7a26..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/config_source.proto +++ /dev/null @@ -1,220 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/authority.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ConfigSourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Configuration sources] - -// xDS API and non-xDS services version. This is used to describe both resource and transport -// protocol versions (in distinct configuration fields). -enum ApiVersion { - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - hidden_envoy_deprecated_AUTO = 0 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; - - // Use xDS v2 API. - hidden_envoy_deprecated_V2 = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; - - // Use xDS v3 API. - V3 = 2; -} - -// API configuration source. This identifies the API type and cluster that Envoy -// will use to fetch an xDS API. -// [#next-free-field: 9] -message ApiConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.ApiConfigSource"; - - // APIs may be fetched via either REST or gRPC. - enum ApiType { - // Ideally this would be 'reserved 0' but one can't reserve the default - // value. Instead we throw an exception if this is ever used. - hidden_envoy_deprecated_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [ - deprecated = true, - (envoy.annotations.disallowed_by_default_enum) = true, - (envoy.annotations.deprecated_at_minor_version_enum) = "3.0" - ]; - - // REST-JSON v2 API. The `canonical JSON encoding - // `_ for - // the v2 protos is used. - REST = 1; - - // SotW gRPC service. - GRPC = 2; - - // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} - // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state - // with every update, the xDS server only sends what has changed since the last update. - DELTA_GRPC = 3; - - // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be - // multiplexed on a single connection to an ADS endpoint. - // [#not-implemented-hide:] - AGGREGATED_GRPC = 5; - - // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be - // multiplexed on a single connection to an ADS endpoint. - // [#not-implemented-hide:] - AGGREGATED_DELTA_GRPC = 6; - } - - // API type (gRPC, REST, delta gRPC) - ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; - - // Cluster names should be used only with REST. If > 1 - // cluster is defined, clusters will be cycled through if any kind of failure - // occurs. - // - // .. note:: - // - // The cluster with name ``cluster_name`` must be statically defined and its - // type must not be ``EDS``. - repeated string cluster_names = 2; - - // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - // services will be cycled through if any kind of failure occurs. - repeated GrpcService grpc_services = 4; - - // For REST APIs, the delay between successive polls. - google.protobuf.Duration refresh_delay = 3; - - // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; - - // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - // rate limited. - RateLimitSettings rate_limit_settings = 6; - - // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - bool set_node_on_first_message_only = 7; -} - -// Aggregated Discovery Service (ADS) options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that ADS is to be used. -message AggregatedConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.AggregatedConfigSource"; -} - -// [#not-implemented-hide:] -// Self-referencing config source options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that other data can be obtained from the same server. -message SelfConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.SelfConfigSource"; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; -} - -// Rate Limit settings to be applied for discovery requests made by Envoy. -message RateLimitSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.RateLimitSettings"; - - // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a - // default value of 100 will be used. - google.protobuf.UInt32Value max_tokens = 1; - - // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens - // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; -} - -// Configuration for :ref:`listeners `, :ref:`clusters -// `, :ref:`routes -// `, :ref:`endpoints -// ` etc. may either be sourced from the -// filesystem or from an xDS API source. Filesystem configs are watched with -// inotify for updates. -// [#next-free-field: 8] -message ConfigSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; - - // Authorities that this config source may be used for. An authority specified in a xdstp:// URL - // is resolved to a *ConfigSource* prior to configuration fetch. This field provides the - // association between authority name and configuration source. - // [#not-implemented-hide:] - repeated xds.core.v3.Authority authorities = 7; - - oneof config_source_specifier { - option (validate.required) = true; - - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - string path = 1; - - // API configuration source. - ApiConfigSource api_config_source = 2; - - // When set, ADS will be used to fetch resources. The ADS API configuration - // source in the bootstrap configuration is used. - AggregatedConfigSource ads = 3; - - // [#not-implemented-hide:] - // When set, the client will access the resources from the same server it got the - // ConfigSource from, although not necessarily from the same stream. This is similar to the - // :ref:`ads` field, except that the client may use a - // different stream to the same server. As a result, this field can be used for things - // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) - // LDS to RDS on the same server without requiring the management server to know its name - // or required credentials. - // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since - // this field can implicitly mean to use the same stream in the case where the ConfigSource - // is provided via ADS and the specified data can also be obtained via ADS.] - SelfConfigSource self = 5; - } - - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - google.protobuf.Duration initial_fetch_timeout = 4; - - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/event_service_config.proto b/generated_api_shadow/envoy/config/core/v4alpha/event_service_config.proto deleted file mode 100644 index a0b4e5590d1d5..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/event_service_config.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "EventServiceConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#not-implemented-hide:] -// Configuration of the event reporting service endpoint. -message EventServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.EventServiceConfig"; - - oneof config_source_specifier { - option (validate.required) = true; - - // Specifies the gRPC service that hosts the event reporting service. - GrpcService grpc_service = 1; - } -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/extension.proto b/generated_api_shadow/envoy/config/core/v4alpha/extension.proto deleted file mode 100644 index 4de107580d072..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/extension.proto +++ /dev/null @@ -1,68 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ExtensionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Extension configuration] - -// Message type for extension configuration. -// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. -message TypedExtensionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.TypedExtensionConfig"; - - // The name of an extension. This is not used to select the extension, instead - // it serves the role of an opaque identifier. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The typed config for the extension. The type URL will be used to identify - // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, - // the inner type URL of *TypedStruct* will be utilized. See the - // :ref:`extension configuration overview - // ` for further details. - google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; -} - -// Configuration source specifier for a late-bound extension configuration. The -// parent resource is warmed until all the initial extension configurations are -// received, unless the flag to apply the default configuration is set. -// Subsequent extension updates are atomic on a per-worker basis. Once an -// extension configuration is applied to a request or a connection, it remains -// constant for the duration of processing. If the initial delivery of the -// extension configuration fails, due to a timeout for example, the optional -// default configuration is applied. Without a default configuration, the -// extension is disabled, until an extension configuration is received. The -// behavior of a disabled extension depends on the context. For example, a -// filter chain with a disabled extension filter rejects all incoming streams. -message ExtensionConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.ExtensionConfigSource"; - - ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; - - // Optional default configuration to use as the initial configuration if - // there is a failure to receive the initial extension configuration or if - // `apply_default_config_without_warming` flag is set. - google.protobuf.Any default_config = 2; - - // Use the default config as the initial configuration without warming and - // waiting for the first discovery response. Requires the default configuration - // to be supplied. - bool apply_default_config_without_warming = 3; - - // A set of permitted extension type URLs. Extension configuration updates are rejected - // if they do not match any type URL in the set. - repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto deleted file mode 100644 index 371ea32c10f3a..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_method_list.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "GrpcMethodListProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC method list] - -// A list of gRPC methods which can be used as an allowlist, for example. -message GrpcMethodList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcMethodList"; - - message Service { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcMethodList.Service"; - - // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The names of the gRPC methods in this service. - repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; - } - - repeated Service services = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto b/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto deleted file mode 100644 index 973983386c2e8..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/grpc_service.proto +++ /dev/null @@ -1,302 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "GrpcServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC services] - -// gRPC service configuration. This is used by :ref:`ApiConfigSource -// ` and filter configurations. -// [#next-free-field: 6] -message GrpcService { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService"; - - message EnvoyGrpc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.EnvoyGrpc"; - - // The name of the upstream gRPC cluster. SSL credentials will be supplied - // in the :ref:`Cluster ` :ref:`transport_socket - // `. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. - // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. - string authority = 2 - [(validate.rules).string = - {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // [#next-free-field: 9] - message GoogleGrpc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc"; - - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. - message SslCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials"; - - // PEM encoded server root certificates. - DataSource root_certs = 1; - - // PEM encoded client private key. - DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // PEM encoded client certificate chain. - DataSource cert_chain = 3; - } - - // Local channel credentials. Only UDS is supported for now. - // See https://github.com/grpc/grpc/pull/15909. - message GoogleLocalCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials"; - } - - // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call - // credential types. - message ChannelCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials"; - - oneof credential_specifier { - option (validate.required) = true; - - SslCredentials ssl_credentials = 1; - - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_default = 2; - - GoogleLocalCredentials local_credentials = 3; - } - } - - // [#next-free-field: 8] - message CallCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials"; - - message ServiceAccountJWTAccessCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." - "ServiceAccountJWTAccessCredentials"; - - string json_key = 1; - - uint64 token_lifetime_seconds = 2; - } - - message GoogleIAMCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; - - string authorization_token = 1; - - string authority_selector = 2; - } - - message MetadataCredentialsFromPlugin { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." - "MetadataCredentialsFromPlugin"; - - reserved 2; - - reserved "config"; - - string name = 1; - - // [#extension-category: envoy.grpc_credentials] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - // Security token service configuration that allows Google gRPC to - // fetch security token from an OAuth 2.0 authorization server. - // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and - // https://github.com/grpc/grpc/pull/19587. - // [#next-free-field: 10] - message StsService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService"; - - // URI of the token exchange service that handles token exchange requests. - // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - // https://github.com/envoyproxy/protoc-gen-validate/issues/303] - string token_exchange_service_uri = 1; - - // Location of the target service or resource where the client - // intends to use the requested security token. - string resource = 2; - - // Logical name of the target service where the client intends to - // use the requested security token. - string audience = 3; - - // The desired scope of the requested security token in the - // context of the service or resource where the token will be used. - string scope = 4; - - // Type of the requested security token. - string requested_token_type = 5; - - // The path of subject token, a security token that represents the - // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; - - // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; - - // The path of actor token, a security token that represents the identity - // of the acting party. The acting party is authorized to use the - // requested security token and act on behalf of the subject. - string actor_token_path = 8; - - // Type of the actor token. - string actor_token_type = 9; - } - - oneof credential_specifier { - option (validate.required) = true; - - // Access token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. - string access_token = 1; - - // Google Compute Engine credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_compute_engine = 2; - - // Google refresh token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. - string google_refresh_token = 3; - - // Service Account JWT Access credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; - - // Google IAM credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. - GoogleIAMCredentials google_iam = 5; - - // Custom authenticator credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. - // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. - MetadataCredentialsFromPlugin from_plugin = 6; - - // Custom security token service which implements OAuth 2.0 token exchange. - // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 - // See https://github.com/grpc/grpc/pull/19587. - StsService sts_service = 7; - } - } - - // Channel arguments. - message ChannelArgs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs"; - - message Value { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value"; - - // Pointer values are not supported, since they don't make any sense when - // delivered via the API. - oneof value_specifier { - option (validate.required) = true; - - string string_value = 1; - - int64 int_value = 2; - } - } - - // See grpc_types.h GRPC_ARG #defines for keys that work here. - map args = 1; - } - - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_len: 1}]; - - ChannelCredentials channel_credentials = 2; - - // A set of call credentials that can be composed with `channel credentials - // `_. - repeated CallCredentials call_credentials = 3; - - // The human readable prefix to use when emitting statistics for the gRPC - // service. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // streams_total, Counter, Total number of streams opened - // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; - - // The name of the Google gRPC credentials factory to use. This must have been registered with - // Envoy. If this is empty, a default credentials factory will be used that sets up channel - // credentials based on other configuration parameters. - string credentials_factory_name = 5; - - // Additional configuration for site-specific customizations of the Google - // gRPC library. - google.protobuf.Struct config = 6; - - // How many bytes each stream can buffer internally. - // If not set an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; - - // Custom channels args. - ChannelArgs channel_args = 8; - } - - reserved 4; - - oneof target_specifier { - option (validate.required) = true; - - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - EnvoyGrpc envoy_grpc = 1; - - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - GoogleGrpc google_grpc = 2; - } - - // The timeout for the gRPC request. This is the timeout for a specific - // request. - google.protobuf.Duration timeout = 3; - - // Additional metadata to include in streams initiated to the GrpcService. This can be used for - // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to - // be injected. For more information, including details on header value syntax, see the - // documentation on :ref:`custom request headers - // `. - repeated HeaderValue initial_metadata = 5; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto b/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto deleted file mode 100644 index bf86f26e665e3..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/health_check.proto +++ /dev/null @@ -1,372 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/event_service_config.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/v3/http.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Health check] -// * Health checking :ref:`architecture overview `. -// * If health checking is configured for a cluster, additional statistics are emitted. They are -// documented :ref:`here `. - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} - -// [#next-free-field: 25] -message HealthCheck { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; - - // Describes the encoding of the payload bytes in the payload. - message Payload { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.Payload"; - - oneof payload { - option (validate.required) = true; - - // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_len: 1}]; - - // [#not-implemented-hide:] Binary payload. - bytes binary = 2; - } - } - - // [#next-free-field: 12] - message HttpHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.HttpHealthCheck"; - - reserved 5, 7; - - reserved "service_name", "use_http2"; - - // The value of the host header in the HTTP health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The host header can be customized for a specific endpoint by setting the - // :ref:`hostname ` field. - string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Specifies the HTTP path that will be requested during health checking. For example - // */healthcheck*. - string path = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // [#not-implemented-hide:] HTTP specific payload. - Payload send = 3; - - // [#not-implemented-hide:] HTTP specific response. - Payload receive = 4; - - // Specifies a list of HTTP headers that should be added to each request that is sent to the - // health checked cluster. For more information, including details on header value syntax, see - // the documentation on :ref:`custom request headers - // `. - repeated HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request that is sent to the - // health checked cluster. - repeated string request_headers_to_remove = 8 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - // semantics of :ref:`Int64Range `. The start and end of each - // range are required. Only statuses in the range [100, 600) are allowed. - repeated type.v3.Int64Range expected_statuses = 9; - - // Use specified application protocol for health checks. - type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; - - // An optional service name parameter which is used to validate the identity of - // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview - // ` for more information. - type.matcher.v4alpha.StringMatcher service_name_matcher = 11; - } - - message TcpHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.TcpHealthCheck"; - - // Empty payloads imply a connect-only health check. - Payload send = 1; - - // When checking the response, “fuzzy” matching is performed such that each - // binary block must be found, and in the order specified, but not - // necessarily contiguous. - repeated Payload receive = 2; - } - - message RedisHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.RedisHealthCheck"; - - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; - } - - // `grpc.health.v1.Health - // `_-based - // healthcheck. See `gRPC doc `_ - // for details. - message GrpcHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.GrpcHealthCheck"; - - // An optional service name parameter which will be sent to gRPC service in - // `grpc.health.v1.HealthCheckRequest - // `_. - // message. See `gRPC health-checking overview - // `_ for more information. - string service_name = 1; - - // The value of the :authority header in the gRPC health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The authority header can be customized for a specific endpoint by setting - // the :ref:`hostname ` field. - string authority = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // Custom health check. - message CustomHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.CustomHealthCheck"; - - reserved 2; - - reserved "config"; - - // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A custom health checker specific configuration which depends on the custom health checker - // being instantiated. See :api:`envoy/config/health_checker` for reference. - // [#extension-category: envoy.health_checkers] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - // Health checks occur over the transport socket specified for the cluster. This implies that if a - // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. - // - // This allows overriding the cluster TLS settings, just for health check connections. - message TlsOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HealthCheck.TlsOptions"; - - // Specifies the ALPN protocols for health check connections. This is useful if the - // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks - // versus data connections. If empty, no ALPN protocols will be set on health check connections. - repeated string alpn_protocols = 1; - } - - reserved 10; - - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true - gt {} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; - - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; - - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; - - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; - - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; - - oneof health_checker { - option (validate.required) = true; - - // HTTP health check. - HttpHealthCheck http_health_check = 8; - - // TCP health check. - TcpHealthCheck tcp_health_check = 9; - - // gRPC health check. - GrpcHealthCheck grpc_health_check = 11; - - // Custom health check. - CustomHealthCheck custom_health_check = 13; - } - - // The "no traffic interval" is a special health check interval that is used when a cluster has - // never had traffic routed to it. This lower interval allows cluster information to be kept up to - // date, without sending a potentially large amount of active health checking traffic for no - // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. Note that this interval takes precedence over - // any other. - // - // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; - - // The "no traffic healthy interval" is a special health check interval that - // is used for hosts that are currently passing active health checking - // (including new hosts) when the cluster has received no traffic. - // - // This is useful for when we want to send frequent health checks with - // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once - // a host in the cluster is marked as healthy. - // - // Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. - // - // If no_traffic_healthy_interval is not set, it will default to the - // no traffic interval and send that interval regardless of health state. - google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy interval" is a health check interval that is used for hosts that are marked as - // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - // standard health check interval that is defined. - // - // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as unhealthy. For subsequent health checks - // Envoy will shift back to using either "unhealthy interval" if present or the standard health - // check interval that is defined. - // - // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; - - // The "healthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as healthy. For subsequent health checks - // Envoy will shift back to using the standard health check interval that is defined. - // - // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; - - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - string event_log_path = 17; - - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - EventServiceConfig event_service = 22; - - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - bool always_log_health_check_failures = 19; - - // This allows overriding the cluster TLS settings, just for health check connections. - TlsOptions tls_options = 21; - - // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's - // :ref:`tranport socket matches `. - // For example, the following match criteria - // - // .. code-block:: yaml - // - // transport_socket_match_criteria: - // useMTLS: true - // - // Will match the following :ref:`cluster socket match ` - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "useMTLS" - // match: - // useMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the - // :ref:`LbEndpoint.Metadata `. - // This allows using different transport socket capabilities for health checking versus proxying to the - // endpoint. - // - // If the key/values pairs specified do not match any - // :ref:`transport socket matches `, - // the cluster's :ref:`transport socket ` - // will be used for health check socket configuration. - google.protobuf.Struct transport_socket_match_criteria = 23; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto b/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto deleted file mode 100644 index ae1c0c9a3d4eb..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/http_uri.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "HttpUriProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP Service URI ] - -// Envoy external URI descriptor -message HttpUri { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HttpUri"; - - // The HTTP server URI. It should be a full FQDN with protocol, host and path. - // - // Example: - // - // .. code-block:: yaml - // - // uri: https://www.googleapis.com/oauth2/v1/certs - // - string uri = 1 [(validate.rules).string = {min_len: 1}]; - - // Specify how `uri` is to be fetched. Today, this requires an explicit - // cluster, but in the future we may support dynamic cluster creation or - // inline DNS resolution. See `issue - // `_. - oneof http_upstream_type { - option (validate.required) = true; - - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - string cluster = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto deleted file mode 100644 index f99ae27f14392..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto +++ /dev/null @@ -1,509 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Protocol options] - -// [#not-implemented-hide:] -message TcpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.TcpProtocolOptions"; -} - -// QUIC protocol options which apply to both downstream and upstream connections. -message QuicProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.QuicProtocolOptions"; - - // Maximum number of streams that the client can negotiate per connection. 100 - // if not specified. - google.protobuf.UInt32Value max_concurrent_streams = 1; - - // `Initial stream-level flow-control receive window - // `_ size. Valid values range from - // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16). - // - // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. - // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the stream buffers. - google.protobuf.UInt32Value initial_stream_window_size = 2 - [(validate.rules).uint32 = {lte: 16777216 gte: 1}]; - - // Similar to *initial_stream_window_size*, but for connection-level - // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). - // window. Currently, this has the same minimum/default as *initial_stream_window_size*. - // - // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default - // window size now, so it's also the minimum. - google.protobuf.UInt32Value initial_connection_window_size = 3 - [(validate.rules).uint32 = {lte: 25165824 gte: 1}]; -} - -message UpstreamHttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.UpstreamHttpProtocolOptions"; - - // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - bool auto_sni = 1; - - // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. - bool auto_san_validation = 2; -} - -// Configures the alternate protocols cache which tracks alternate protocols that can be used to -// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for -// HTTP Alternate Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 -// for the "HTTPS" DNS resource record. -message AlternateProtocolsCacheOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.AlternateProtocolsCacheOptions"; - - // The name of the cache. Multiple named caches allow independent alternate protocols cache - // configurations to operate within a single Envoy process using different configurations. All - // alternate protocols cache options with the same name *must* be equal in all fields when - // referenced from different configuration components. Configuration will fail to load if this is - // not the case. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The maximum number of entries that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum entries in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; -} - -// [#next-free-field: 7] -message HttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.HttpProtocolOptions"; - - // Action to take when Envoy receives client request with header names containing underscore - // characters. - // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented - // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore - // characters. - enum HeadersWithUnderscoresAction { - // Allow headers with underscores. This is the default behavior. - ALLOW = 0; - - // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests - // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter - // is incremented for each rejected request. - REJECT_REQUEST = 1; - - // Drop the header with name containing underscores. The header is dropped before the filter chain is - // invoked and as such filters will not see dropped headers. The - // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. - DROP_HEADER = 2; - } - - // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. When the - // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - // downstream connection a drain sequence will occur prior to closing the connection, see - // :ref:`drain_timeout - // `. - // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled for downstream connections according to the value for - // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. - google.protobuf.Duration idle_timeout = 1; - - // The maximum duration of a connection. The duration is defined as a period since a connection - // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout - // `. - // Note: not implemented for upstream connections. - google.protobuf.Duration max_connection_duration = 3; - - // The maximum number of headers. If unconfigured, the default - // maximum number of request headers allowed is 100. Requests that exceed this limit will receive - // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - // reset independent of any other timeouts. If not specified, this value is not set. - google.protobuf.Duration max_stream_duration = 4; - - // Action to take when a client request with a header name containing underscore characters is received. - // If this setting is not specified, the value defaults to ALLOW. - // Note: upstream responses are not affected by this setting. - HeadersWithUnderscoresAction headers_with_underscores_action = 5; - - // Optional maximum requests for both upstream and downstream connections. - // If not specified, there is no limit. - // Setting this parameter to 1 will effectively disable keep alive. - // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. - google.protobuf.UInt32Value max_requests_per_connection = 6; -} - -// [#next-free-field: 8] -message Http1ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http1ProtocolOptions"; - - // [#next-free-field: 9] - message HeaderKeyFormat { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat"; - - message ProperCaseWords { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; - } - - oneof header_format { - option (validate.required) = true; - - // Formats the header by proper casing words: the first character and any character following - // a special character will be capitalized if it's an alpha character. For example, - // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - // Note that while this results in most headers following conventional casing, certain headers - // are not covered. For example, the "TE" header will be formatted as "Te". - ProperCaseWords proper_case_words = 1; - - // Configuration for stateful formatter extensions that allow using received headers to - // affect the output of encoding headers. E.g., preserving case during proxying. - // [#extension-category: envoy.http.stateful_header_formatters] - TypedExtensionConfig stateful_formatter = 8; - } - } - - // Handle HTTP requests with absolute URLs in the requests. These requests - // are generally sent by clients to forward/explicit proxies. This allows clients to configure - // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - // *http_proxy* environment variable. - google.protobuf.BoolValue allow_absolute_url = 1; - - // Handle incoming HTTP/1.0 and HTTP 0.9 requests. - // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - // style connect logic, dechunking, and handling lack of client host iff - // *default_host_for_http_10* is configured. - bool accept_http_10 = 2; - - // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - // Envoy does not otherwise support HTTP/1.0 without a Host header. - // This is a no-op if *accept_http_10* is not true. - string default_host_for_http_10 = 3; - - // Describes how the keys for response headers should be formatted. By default, all header keys - // are lower cased. - HeaderKeyFormat header_key_format = 4; - - // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - // - // .. attention:: - // - // Note that this only happens when Envoy is chunk encoding which occurs when: - // - The request is HTTP/1.1. - // - Is neither a HEAD only request nor a HTTP Upgrade. - // - Not a response to a HEAD request. - // - The content length header is not present. - bool enable_trailers = 5; - - // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding` - // headers set. By default such messages are rejected, but if option is enabled - Envoy will - // remove Content-Length header and process message. - // See `RFC7230, sec. 3.3.3 ` for details. - // - // .. attention:: - // Enabling this option might lead to request smuggling vulnerability, especially if traffic - // is proxied via multiple layers of proxies. - bool allow_chunked_length = 6; - - // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate - // HTTP/1.1 connections upon receiving an invalid HTTP message. However, - // when this option is true, then Envoy will leave the HTTP/1.1 connection - // open where possible. - // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // `. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; -} - -message KeepaliveSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.KeepaliveSettings"; - - // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. - // If this is zero, interval PINGs will not be sent. - google.protobuf.Duration interval = 1 [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // How long to wait for a response to a keepalive PING. If a response is not received within this - // time period, the connection will be aborted. - google.protobuf.Duration timeout = 2 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // A random jitter amount as a percentage of interval that will be added to each interval. - // A value of zero means there will be no jitter. - // The default value is 15%. - type.v3.Percent interval_jitter = 3; - - // If the connection has been idle for this duration, send a HTTP/2 ping ahead - // of new stream creation, to quickly detect dead connections. - // If this is zero, this type of PING will not be sent. - // If an interval ping is outstanding, a second ping will not be sent as the - // interval ping will determine if the connection is dead. - google.protobuf.Duration connection_idle_interval = 4 - [(validate.rules).duration = {gte {nanos: 1000000}}]; -} - -// [#next-free-field: 16] -message Http2ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http2ProtocolOptions"; - - // Defines a parameter to be sent in the SETTINGS frame. - // See `RFC7540, sec. 6.5.1 `_ for details. - message SettingsParameter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter"; - - // The 16 bit parameter identifier. - google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65535 gte: 0}, - (validate.rules).message = {required: true} - ]; - - // The 32 bit parameter value. - google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; - } - - // `Maximum table size `_ - // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values - // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header - // compression. - google.protobuf.UInt32Value hpack_table_size = 1; - - // `Maximum concurrent streams `_ - // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) - // and defaults to 2147483647. - // - // For upstream connections, this also limits how many streams Envoy will initiate concurrently - // on a single connection. If the limit is reached, Envoy may queue requests or establish - // additional connections (as allowed per circuit breaker limits). - // - // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given - // connection based on upstream settings. Config dumps will reflect the configured upper bound, - // not the per-connection negotiated limits. - google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; - - // `Initial stream-level flow-control window - // `_ size. Valid values range from 65535 - // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 - // (256 * 1024 * 1024). - // - // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default - // window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the codec buffers. - google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Similar to *initial_stream_window_size*, but for connection-level flow-control - // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. - google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Allows proxying Websocket and other upgrades over H2 connect. - bool allow_connect = 5; - - // [#not-implemented-hide:] Hiding until envoy has full metadata support. - // Still under implementation. DO NOT USE. - // - // Allows metadata. See [metadata - // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more - // information. - bool allow_metadata = 6; - - // Limit the number of pending outbound downstream frames of all types (frames that are waiting to - // be written into the socket). Exceeding this limit triggers flood mitigation and connection is - // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due - // to flood mitigation. The default limit is 10000. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, - // preventing high memory utilization when receiving continuous stream of these frames. Exceeding - // this limit triggers flood mitigation and connection is terminated. The - // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood - // mitigation. The default limit is 1000. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an - // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but - // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` - // stat tracks the number of connections terminated due to flood mitigation. - // Setting this to 0 will terminate connection upon receiving first frame with an empty payload - // and no end stream flag. The default limit is 1. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; - - // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number - // of PRIORITY frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // max_inbound_priority_frames_per_stream * (1 + opened_streams) - // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when - // Envoy receives complete response headers from the upstream server. For upstream connection the - // `opened_streams` is incremented when Envoy send the HEADERS frame for a new stream. The - // ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 100. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; - - // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number - // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // 5 + 2 * (opened_streams + - // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) - // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when - // Envoy receives complete response headers from the upstream server. For upstream connections the - // `opened_streams` is incremented when Envoy sends the HEADERS frame for a new stream. The - // ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to - // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. - // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, - // but more complex implementations that try to estimate available bandwidth require at least 2. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 - [(validate.rules).uint32 = {gte: 1}]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging - // ` - // iff present. - // - // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message - // ` - // - // See `RFC7540, sec. 8.1 `_ for details. - bool hidden_envoy_deprecated_stream_error_on_invalid_http_messaging = 12 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // ` - // - // See `RFC7540, sec. 8.1 `_ for details. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; - - // [#not-implemented-hide:] - // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: - // - // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by - // Envoy. - // - // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field - // 'allow_connect'. - // - // Note that custom parameters specified through this field can not also be set in the - // corresponding named parameters: - // - // .. code-block:: text - // - // ID Field Name - // ---------------- - // 0x1 hpack_table_size - // 0x3 max_concurrent_streams - // 0x4 initial_stream_window_size - // - // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies - // between custom parameters with the same identifier will trigger a failure. - // - // See `IANA HTTP/2 Settings - // `_ for - // standardized identifiers. - repeated SettingsParameter custom_settings_parameters = 13; - - // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer - // does not respond within the configured timeout, the connection will be aborted. - KeepaliveSettings connection_keepalive = 15; -} - -// [#not-implemented-hide:] -message GrpcProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.GrpcProtocolOptions"; - - Http2ProtocolOptions http2_protocol_options = 1; -} - -// A message which allows using HTTP/3. -message Http3ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.Http3ProtocolOptions"; - - QuicProtocolOptions quic_protocol_options = 1; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // `. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 2; -} - -// A message to control transformations to the :scheme header -message SchemeHeaderTransformation { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.SchemeHeaderTransformation"; - - oneof transformation { - // Overwrite any Scheme header with the contents of this string. - string scheme_to_overwrite = 1 [(validate.rules).string = {in: "http" in: "https"}]; - } -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto deleted file mode 100644 index 1650f29d8cab6..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/proxy_protocol.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Proxy Protocol] - -message ProxyProtocolConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.ProxyProtocolConfig"; - - enum Version { - // PROXY protocol version 1. Human readable format. - V1 = 0; - - // PROXY protocol version 2. Binary format. - V2 = 1; - } - - // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details - Version version = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/resolver.proto b/generated_api_shadow/envoy/config/core/v4alpha/resolver.proto deleted file mode 100644 index 4849a54161ced..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/resolver.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "ResolverProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Resolver] - -// Configuration of DNS resolver option flags which control the behavior of the DNS resolver. -message DnsResolverOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.DnsResolverOptions"; - - // Use TCP for all DNS queries instead of the default protocol UDP. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only uses UDP for DNS resolution. - bool use_tcp_for_dns_lookups = 1; - - // Do not use the default search domains; only query hostnames as-is or as aliases. - bool no_default_search_domain = 2; -} - -// DNS resolution configuration which includes the underlying dns resolver addresses and options. -message DnsResolutionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.DnsResolutionConfig"; - - // A list of dns resolver addresses. If specified, the DNS client library will perform resolution - // via the underlying DNS resolvers. Otherwise, the default system resolvers - // (e.g., /etc/resolv.conf) will be used. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - repeated Address resolvers = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. - DnsResolverOptions dns_resolver_options = 2; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/socket_option.proto b/generated_api_shadow/envoy/config/core/v4alpha/socket_option.proto deleted file mode 100644 index 7dac394a865dc..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/socket_option.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "SocketOptionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Socket Option ] - -// Generic socket option message. This would be used to set socket options that -// might not exist in upstream kernels or precompiled Envoy binaries. -// [#next-free-field: 7] -message SocketOption { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketOption"; - - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } - - // An optional name to give this socket option for debugging, etc. - // Uniqueness is not required and no special meaning is assumed. - string description = 1; - - // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - int64 level = 2; - - // The numeric name as passed to setsockopt - int64 name = 3; - - oneof value { - option (validate.required) = true; - - // Because many sockopts take an int value. - int64 int_value = 4; - - // Otherwise it's a byte buffer. - bytes buf_value = 5; - } - - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto deleted file mode 100644 index 8bb1a9e53e561..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/substitution_format_string.proto +++ /dev/null @@ -1,118 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; - -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "SubstitutionFormatStringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Substitution format string] - -// Configuration to use multiple :ref:`command operators ` -// to generate a new string in either plain text or JSON format. -// [#next-free-field: 7] -message SubstitutionFormatString { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.SubstitutionFormatString"; - - oneof format { - option (validate.required) = true; - - // Specify a format with command operators to form a text string. - // Its details is described in :ref:`format string`. - // - // For example, setting ``text_format`` like below, - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // generates plain text similar to: - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - // Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field. - string hidden_envoy_deprecated_text_format = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Specify a format with command operators to form a JSON string. - // Its details is described in :ref:`format dictionary`. - // Values are rendered as strings, numbers, or boolean values as appropriate. - // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). - // See the documentation for a specific command operator for details. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // json_format: - // status: "%RESPONSE_CODE%" - // message: "%LOCAL_REPLY_BODY%" - // - // The following JSON object would be created: - // - // .. code-block:: json - // - // { - // "status": 500, - // "message": "My error message" - // } - // - google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; - - // Specify a format with command operators to form a text string. - // Its details is described in :ref:`format string`. - // - // For example, setting ``text_format`` like below, - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format_source: - // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // generates plain text similar to: - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - DataSource text_format_source = 5; - } - - // If set to true, when command operators are evaluated to null, - // - // * for ``text_format``, the output of the empty operator is changed from ``-`` to an - // empty string, so that empty values are omitted entirely. - // * for ``json_format`` the keys with null values are omitted in the output structure. - bool omit_empty_values = 3; - - // Specify a *content_type* field. - // If this field is not set then ``text/plain`` is used for *text_format* and - // ``application/json`` is used for *json_format*. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // content_type: "text/html; charset=UTF-8" - // - string content_type = 4; - - // Specifies a collection of Formatter plugins that can be called from the access log configuration. - // See the formatters extensions documentation for details. - // [#extension-category: envoy.formatter] - repeated TypedExtensionConfig formatters = 6; -} diff --git a/generated_api_shadow/envoy/config/core/v4alpha/udp_socket_config.proto b/generated_api_shadow/envoy/config/core/v4alpha/udp_socket_config.proto deleted file mode 100644 index 5fa6c6ec52dd1..0000000000000 --- a/generated_api_shadow/envoy/config/core/v4alpha/udp_socket_config.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v4alpha; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; -option java_outer_classname = "UdpSocketConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: UDP socket config] - -// Generic UDP socket configuration. -message UdpSocketConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.core.v3.UdpSocketConfig"; - - // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate - // more memory per socket. Received datagrams above this size will be dropped. If not set - // defaults to 1500 bytes. - google.protobuf.UInt64Value max_rx_datagram_size = 1 - [(validate.rules).uint64 = {lt: 65536 gt: 0}]; - - // Configures whether Generic Receive Offload (GRO) - // _ is preferred when reading from the - // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. - // This option affects performance but not functionality. If GRO is not supported by the operating - // system, non-GRO receive will be used. - google.protobuf.BoolValue prefer_gro = 2; -} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto index 0e10ac3b2fca7..0a9aac105e72d 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto @@ -4,10 +4,12 @@ package envoy.config.endpoint.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/health_check.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -108,21 +110,51 @@ message LbEndpoint { google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; } +// [#not-implemented-hide:] +// A configuration for a LEDS collection. +message LedsClusterLocalityConfig { + // Configuration for the source of LEDS updates for a Locality. + core.v3.ConfigSource leds_config = 1; + + // The xDS transport protocol glob collection resource name. + // The service is only supported in delta xDS (incremental) mode. + string leds_collection_name = 2; +} + // A group of endpoints belonging to a Locality. // One can have multiple LocalityLbEndpoints for a locality, but this is // generally only done if the different groups need to have different load // balancing weights or different priorities. -// [#next-free-field: 7] +// [#next-free-field: 9] message LocalityLbEndpoints { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LocalityLbEndpoints"; + // [#not-implemented-hide:] + // A list of endpoints of a specific locality. + message LbEndpointList { + repeated LbEndpoint lb_endpoints = 1; + } + // Identifies location of where the upstream hosts run. core.v3.Locality locality = 1; // The group of endpoints belonging to the locality specified. + // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be + // deprecated and replaced by *load_balancer_endpoints*.] repeated LbEndpoint lb_endpoints = 2; + // [#not-implemented-hide:] + oneof lb_config { + // The group of endpoints belonging to the locality. + // [#comment:TODO(adisuissa): Once LEDS is implemented the *lb_endpoints* field + // needs to be deprecated.] + LbEndpointList load_balancer_endpoints = 7; + + // LEDS Configuration for the current locality. + LedsClusterLocalityConfig leds_cluster_locality_config = 8; + } + // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load // balancing weight for a locality is divided by the sum of the weights of all // localities at the same priority level to produce the effective percentage diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/BUILD b/generated_api_shadow/envoy/config/listener/v4alpha/BUILD deleted file mode 100644 index 6b67fe7e4cdd1..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v4alpha/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto deleted file mode 100644 index 518caf879ad5e..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "ApiListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: API listener] - -// Describes a type of API listener, which is used in non-proxy clients. The type of API -// exposed to the non-proxy application depends on the type of API listener. -message ApiListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ApiListener"; - - // The type in this field determines the type of API listener. At present, the following - // types are supported: - // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) - // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) - // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the - // specific config message for each type of API listener. We could not do this in v2 because - // it would have caused circular dependencies for go protos: lds.proto depends on this file, - // and http_connection_manager.proto depends on rds.proto, which is in the same directory as - // lds.proto, so lds.proto cannot depend on this file.] - google.protobuf.Any api_listener = 1; -} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto deleted file mode 100644 index ccd900b6f4d34..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto +++ /dev/null @@ -1,324 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/socket_option.proto"; -import "envoy/config/listener/v4alpha/api_listener.proto"; -import "envoy/config/listener/v4alpha/listener_components.proto"; -import "envoy/config/listener/v4alpha/udp_listener_config.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/collection_entry.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "ListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Listener configuration] -// Listener :ref:`configuration overview ` - -// Listener list collections. Entries are *Listener* resources or references. -// [#not-implemented-hide:] -message ListenerCollection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ListenerCollection"; - - repeated xds.core.v3.CollectionEntry entries = 1; -} - -// [#next-free-field: 30] -message Listener { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; - - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - - // [#not-implemented-hide:] - message DeprecatedV1 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.Listener.DeprecatedV1"; - - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // This is deprecated. Use :ref:`Listener.bind_to_port - // ` - google.protobuf.BoolValue bind_to_port = 1; - } - - // Configuration for listener connection balancing. - message ConnectionBalanceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.Listener.ConnectionBalanceConfig"; - - // A connection balancer implementation that does exact balancing. This means that a lock is - // held during balancing so that connection counts are nearly exactly balanced between worker - // threads. This is "nearly" exact in the sense that a connection might close in parallel thus - // making the counts incorrect, but this should be rectified on the next accept. This balancer - // sacrifices accept throughput for accuracy and should be used when there are a small number of - // connections that rarely cycle (e.g., service mesh gRPC egress). - message ExactBalance { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance"; - } - - oneof balance_type { - option (validate.required) = true; - - // If specified, the listener will use the exact connection balancer. - ExactBalance exact_balance = 1; - } - } - - // Configuration for envoy internal listener. All the future internal listener features should be added here. - // [#not-implemented-hide:] - message InternalListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.Listener.InternalListenerConfig"; - } - - reserved 14, 23; - - // The unique name by which this listener is known. If no name is provided, - // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - // updated or removed via :ref:`LDS ` a unique name must be provided. - string name = 1; - - // The address that the listener should listen on. In general, the address must be unique, though - // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - // Linux as the actual port will be allocated by the OS. - core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}]; - - // Optional prefix to use on listener stats. If empty, the stats will be rooted at - // `listener.
.`. If non-empty, stats will be rooted at - // `listener..`. - string stat_prefix = 28; - - // A list of filter chains to consider for this listener. The - // :ref:`FilterChain ` with the most specific - // :ref:`FilterChainMatch ` criteria is used on a - // connection. - // - // Example using SNI for filter chain selection can be found in the - // :ref:`FAQ entry `. - repeated FilterChain filter_chains = 3; - - // If a connection is redirected using *iptables*, the port on which the proxy - // receives it might be different from the original destination address. When this flag is set to - // true, the listener hands off redirected connections to the listener associated with the - // original destination address. If there is no listener associated with the original destination - // address, the connection is handled by the listener that receives it. Defaults to false. - google.protobuf.BoolValue use_original_dst = 4; - - // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, - // the connection will be closed. The filter chain match is ignored in this field. - FilterChain default_filter_chain = 25; - - // Soft limit on size of the listener’s new connection read and write buffers. - // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Listener metadata. - core.v4alpha.Metadata metadata = 6; - - // [#not-implemented-hide:] - DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The type of draining to perform at a listener-wide level. - DrainType drain_type = 8; - - // Listener filters have the opportunity to manipulate and augment the connection metadata that - // is used in connection filter chain matching, for example. These filters are run before any in - // :ref:`filter_chains `. Order matters as the - // filters are processed sequentially right after a socket has been accepted by the listener, and - // before a connection is created. - // UDP Listener filters can be specified when the protocol in the listener socket address in - // :ref:`protocol ` is :ref:`UDP - // `. - // UDP listeners currently support a single filter. - repeated ListenerFilter listener_filters = 9; - - // The timeout to wait for all listener filters to complete operation. If the timeout is reached, - // the accepted socket is closed without a connection being created unless - // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - // timeout. If not specified, a default timeout of 15s is used. - google.protobuf.Duration listener_filters_timeout = 15; - - // Whether a connection should be created when listener filters timeout. Default is false. - // - // .. attention:: - // - // Some listener filters, such as :ref:`Proxy Protocol filter - // `, should not be used with this option. It will cause - // unexpected behavior when a connection is created. - bool continue_on_listener_filters_timeout = 17; - - // Whether the listener should be set as a transparent socket. - // When this flag is set to true, connections can be redirected to the listener using an - // *iptables* *TPROXY* target, in which case the original source and destination addresses and - // ports are preserved on accepted connections. This flag should be used in combination with - // :ref:`an original_dst ` :ref:`listener filter - // ` to mark the connections' local addresses as - // "restored." This can be used to hand off each redirected connection to another listener - // associated with the connection's destination address. Direct connections to the socket without - // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - // therefore treated as if they were redirected. - // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - // When this flag is not set (default), the socket is not modified, i.e. the transparent option - // is neither set nor reset. - google.protobuf.BoolValue transparent = 10; - - // Whether the listener should set the *IP_FREEBIND* socket option. When this - // flag is set to true, listeners can be bound to an IP address that is not - // configured on the system running Envoy. When this flag is set to false, the - // option *IP_FREEBIND* is disabled on the socket. When this flag is not set - // (default), the socket is not modified, i.e. the option is neither enabled - // nor disabled. - google.protobuf.BoolValue freebind = 11; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.v4alpha.SocketOption socket_options = 13; - - // Whether the listener should accept TCP Fast Open (TFO) connections. - // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - // the socket, with a queue length of the specified size - // (see `details in RFC7413 `_). - // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - // When this flag is not set (default), the socket is not modified, - // i.e. the option is neither enabled nor disabled. - // - // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - // TCP_FASTOPEN. - // See `ip-sysctl.txt `_. - // - // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - - // Specifies the intended direction of the traffic relative to the local Envoy. - // This property is required on Windows for listeners using the original destination filter, - // see :ref:`Original Destination `. - core.v4alpha.TrafficDirection traffic_direction = 16; - - // If the protocol in the listener socket address in :ref:`protocol - // ` is :ref:`UDP - // `, this field specifies UDP - // listener specific configuration. - UdpListenerConfig udp_listener_config = 18; - - // Used to represent an API listener, which is used in non-proxy clients. The type of API - // exposed to the non-proxy application depends on the type of API listener. - // When this field is set, no other field except for :ref:`name` - // should be set. - // - // .. note:: - // - // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, - // not LDS. - // - // [#next-major-version: In the v3 API, instead of this messy approach where the socket - // listener fields are directly in the top-level Listener message and the API listener types - // are in the ApiListener message, the socket listener messages should be in their own message, - // and the top-level Listener should essentially be a oneof that selects between the - // socket listener and the various types of API listener. That way, a given Listener message - // can structurally only contain the fields of the relevant type.] - ApiListener api_listener = 19; - - // The listener's connection balancer configuration, currently only applicable to TCP listeners. - // If no configuration is specified, Envoy will not attempt to balance active connections between - // worker threads. - // - // In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 - // by setting :ref:`use_original_dst ` in X - // and :ref:`bind_to_port ` to false in Y1 and Y2, - // it is recommended to disable the balance config in listener X to avoid the cost of balancing, and - // enable the balance config in Y1 and Y2 to balance the connections among the workers. - ConnectionBalanceConfig connection_balance_config = 20; - - // Deprecated. Use `enable_reuse_port` instead. - bool hidden_envoy_deprecated_reuse_port = 21 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and - // create one socket for each worker thread. This makes inbound connections - // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. This field - // defaults to true. - // - // .. attention:: - // - // Although this field defaults to true, it has different behavior on different platforms. See - // the following text for more information. - // - // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly - // with hot restart. - // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, - // the last socket wins and receives all connections/packets. For TCP, reuse_port is force - // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive - // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only - // a single worker will currently receive packets. - // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user - // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. - google.protobuf.BoolValue enable_reuse_port = 29; - - // Configuration for :ref:`access logs ` - // emitted by this listener. - repeated accesslog.v4alpha.AccessLog access_log = 22; - - // The maximum length a tcp listener's pending connections queue can grow to. If no value is - // provided net.core.somaxconn will be used on Linux and 128 otherwise. - google.protobuf.UInt32Value tcp_backlog_size = 24; - - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that set - // :ref:`use_original_dst ` - // to true. Default is true. - google.protobuf.BoolValue bind_to_port = 26; - - // The exclusive listener type and the corresponding config. - // TODO(lambdai): https://github.com/envoyproxy/envoy/issues/15372 - // Will create and add TcpListenerConfig. Will add UdpListenerConfig and ApiListener. - // [#not-implemented-hide:] - oneof listener_specifier { - // Used to represent an internal listener which does not listen on OSI L4 address but can be used by the - // :ref:`envoy cluster ` to create a user space connection to. - // The internal listener acts as a tcp listener. It supports listener filters and network filter chains. - // The internal listener require :ref:`address ` has - // field `envoy_internal_address`. - // - // There are some limitations are derived from the implementation. The known limitations include - // - // * :ref:`ConnectionBalanceConfig ` is not - // allowed because both cluster connection and listener connection must be owned by the same dispatcher. - // * :ref:`tcp_backlog_size ` - // * :ref:`freebind ` - // * :ref:`transparent ` - // [#not-implemented-hide:] - InternalListenerConfig internal_listener = 27; - } -} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto deleted file mode 100644 index 48e068e4ae59f..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto +++ /dev/null @@ -1,363 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "ListenerComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Listener components] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 6] -message Filter { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Filter"; - - reserved 3, 2; - - reserved "config"; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - // [#extension-category: envoy.filters.network] - google.protobuf.Any typed_config = 4; - - // Configuration source specifier for an extension configuration discovery - // service. In case of a failure and without the default configuration, the - // listener closes the connections. - // [#not-implemented-hide:] - core.v4alpha.ExtensionConfigSource config_discovery = 5; - } -} - -// Specifies the match criteria for selecting a specific filter chain for a -// listener. -// -// In order for a filter chain to be selected, *ALL* of its criteria must be -// fulfilled by the incoming connection, properties of which are set by the -// networking stack and/or listener filters. -// -// The following order applies: -// -// 1. Destination port. -// 2. Destination IP address. -// 3. Server name (e.g. SNI for TLS protocol), -// 4. Transport protocol. -// 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Directly connected source IP address (this will only be different from the source IP address -// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol -// listener filter `). -// 7. Source type (e.g. any, local or external network). -// 8. Source IP address. -// 9. Source port. -// -// For criteria that allow ranges or wildcards, the most specific value in any -// of the configured filter chains that matches the incoming connection is going -// to be used (e.g. for SNI ``www.example.com`` the most specific match would be -// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter -// chain without ``server_names`` requirements). -// -// A different way to reason about the filter chain matches: -// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. -// In each step, filter chains which most specifically matches the attributes continue to the next step. -// The listener guarantees at most 1 filter chain is left after all of the steps. -// -// Example: -// -// For destination port, filter chains specifying the destination port of incoming traffic are the -// most specific match. If none of the filter chains specifies the exact destination port, the filter -// chains which do not specify ports are the most specific match. Filter chains specifying the -// wrong port can never be the most specific match. -// -// [#comment: Implemented rules are kept in the preference order, with deprecated fields -// listed at the end, because that's how we want to list them in the docs. -// -// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 14] -message FilterChainMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.FilterChainMatch"; - - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - - // Match a connection originating from the same host. - SAME_IP_OR_LOOPBACK = 1; - - // Match a connection originating from a different host. - EXTERNAL = 2; - } - - reserved 1; - - // Optional destination port to consider when use_original_dst is set on the - // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; - - // If non-empty, an IP address and prefix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - repeated core.v4alpha.CidrRange prefix_ranges = 3; - - // If non-empty, an IP address and suffix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - // [#not-implemented-hide:] - string address_suffix = 4; - - // [#not-implemented-hide:] - google.protobuf.UInt32Value suffix_len = 5; - - // The criteria is satisfied if the directly connected source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the parameter is not - // specified or the list is empty, the directly connected source IP address is ignored. - repeated core.v4alpha.CidrRange direct_source_prefix_ranges = 13; - - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; - - // The criteria is satisfied if the source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the - // parameter is not specified or the list is empty, the source IP address is - // ignored. - repeated core.v4alpha.CidrRange source_prefix_ranges = 6; - - // The criteria is satisfied if the source port of the downstream connection - // is contained in at least one of the specified ports. If the parameter is - // not specified, the source port is ignored. - repeated uint32 source_ports = 7 - [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; - - // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining - // a filter chain match. Those values will be compared against the server names of a new - // connection, when detected by one of the listener filters. - // - // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` - // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. - // - // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. - // - // .. attention:: - // - // See the :ref:`FAQ entry ` on how to configure SNI for more - // information. - repeated string server_names = 11; - - // If non-empty, a transport protocol to consider when determining a filter chain match. - // This value will be compared against the transport protocol of a new connection, when - // it's detected by one of the listener filters. - // - // Suggested values include: - // - // * ``raw_buffer`` - default, used when no transport protocol is detected, - // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // when TLS protocol is detected. - string transport_protocol = 9; - - // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when - // determining a filter chain match. Those values will be compared against the application - // protocols of a new connection, when detected by one of the listener filters. - // - // Suggested values include: - // - // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector - // `, - // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // - // .. attention:: - // - // Currently, only :ref:`TLS Inspector ` provides - // application protocol detection based on the requested - // `ALPN `_ values. - // - // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, - // and matching on values other than ``h2`` is going to lead to a lot of false negatives, - // unless all connecting clients are known to use ALPN. - repeated string application_protocols = 10; -} - -// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and -// various other parameters. -// [#next-free-field: 10] -message FilterChain { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.FilterChain"; - - // The configuration for on-demand filter chain. If this field is not empty in FilterChain message, - // a filter chain will be built on-demand. - // On-demand filter chains help speedup the warming up of listeners since the building and initialization of - // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. - // Filter chains that are not often used can be set as on-demand. - message OnDemandConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.FilterChain.OnDemandConfiguration"; - - // The timeout to wait for filter chain placeholders to complete rebuilding. - // 1. If this field is set to 0, timeout is disabled. - // 2. If not specified, a default timeout of 15s is used. - // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. - // Upon failure or timeout, all connections related to this filter chain will be closed. - // Rebuilding will start again on the next new connection. - google.protobuf.Duration rebuild_timeout = 1; - } - - reserved 2; - - reserved "tls_context"; - - // The criteria to use when matching a connection to this filter chain. - FilterChainMatch filter_chain_match = 1; - - // A list of individual network filters that make up the filter chain for - // connections established with the listener. Order matters as the filters are - // processed sequentially as connection events happen. Note: If the filter - // list is empty, the connection will close by default. - repeated Filter filters = 3; - - // Whether the listener should expect a PROXY protocol V1 header on new - // connections. If this option is enabled, the listener will assume that that - // remote address of the connection is the one specified in the header. Some - // load balancers including the AWS ELB support this option. If the option is - // absent or set to false, Envoy will use the physical peer address of the - // connection as the remote address. - // - // This field is deprecated. Add a - // :ref:`PROXY protocol listener filter ` - // explicitly instead. - google.protobuf.BoolValue hidden_envoy_deprecated_use_proxy_proto = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // [#not-implemented-hide:] filter chain metadata. - core.v4alpha.Metadata metadata = 5; - - // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`DownstreamTlsContext ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - // [#extension-category: envoy.transport_sockets.downstream] - core.v4alpha.TransportSocket transport_socket = 6; - - // If present and nonzero, the amount of time to allow incoming connections to complete any - // transport socket negotiations. If this expires before the transport reports connection - // establishment, the connection is summarily closed. - google.protobuf.Duration transport_socket_connect_timeout = 9; - - // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - // chain is to be dynamically updated or removed via FCDS a unique name must be provided. - string name = 7; - - // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. - // If this field is not empty, the filter chain will be built on-demand. - // Otherwise, the filter chain will be built normally and block listener warming. - OnDemandConfiguration on_demand_configuration = 8; -} - -// Listener filter chain match configuration. This is a recursive structure which allows complex -// nested match configurations to be built using various logical operators. -// -// Examples: -// -// * Matches if the destination port is 3306. -// -// .. code-block:: yaml -// -// destination_port_range: -// start: 3306 -// end: 3307 -// -// * Matches if the destination port is 3306 or 15000. -// -// .. code-block:: yaml -// -// or_match: -// rules: -// - destination_port_range: -// start: 3306 -// end: 3307 -// - destination_port_range: -// start: 15000 -// end: 15001 -// -// [#next-free-field: 6] -message ListenerFilterChainMatchPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ListenerFilterChainMatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated ListenerFilterChainMatchPredicate rules = 1 - [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - ListenerFilterChainMatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // Match destination port. Particularly, the match evaluation must use the recovered local port if - // the owning listener filter is after :ref:`an original_dst listener filter `. - type.v3.Int32Range destination_port_range = 5; - } -} - -message ListenerFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ListenerFilter"; - - reserved 2; - - reserved "config"; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] - google.protobuf.Any typed_config = 3; - } - - // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - // See :ref:`ListenerFilterChainMatchPredicate ` - // for further examples. - ListenerFilterChainMatchPredicate filter_disabled = 4; -} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto deleted file mode 100644 index 0b6d6bd7584ce..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v4alpha/quic_config.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/protocol.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "QuicConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: QUIC listener config] - -// Configuration specific to the UDP QUIC listener. -// [#next-free-field: 8] -message QuicProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.QuicProtocolOptions"; - - core.v4alpha.QuicProtocolOptions quic_protocol_options = 1; - - // Maximum number of milliseconds that connection will be alive when there is - // no network activity. 300000ms if not specified. - google.protobuf.Duration idle_timeout = 2; - - // Connection timeout in milliseconds before the crypto handshake is finished. - // 20000ms if not specified. - google.protobuf.Duration crypto_handshake_timeout = 3; - - // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults - // to enabled. - core.v4alpha.RuntimeFeatureFlag enabled = 4; - - // A multiplier to number of connections which is used to determine how many packets to read per - // event loop. A reasonable number should allow the listener to process enough payload but not - // starve TCP and other UDP sockets and also prevent long event loop duration. - // The default value is 32. This means if there are N QUIC connections, the total number of - // packets to read in each read event will be 32 * N. - // The actual number of packets to read in total by the UDP listener is also - // bound by 6000, regardless of this field or how many connections there are. - google.protobuf.UInt32Value packets_to_read_to_connection_count_ratio = 5 - [(validate.rules).uint32 = {gte: 1}]; - - // Configure which implementation of `quic::QuicCryptoClientStreamBase` to be used for this listener. - // If not specified the :ref:`QUICHE default one configured by ` will be used. - // [#extension-category: envoy.quic.server.crypto_stream] - core.v4alpha.TypedExtensionConfig crypto_stream_config = 6; - - // Configure which implementation of `quic::ProofSource` to be used for this listener. - // If not specified the :ref:`default one configured by ` will be used. - // [#extension-category: envoy.quic.proof_source] - core.v4alpha.TypedExtensionConfig proof_source_config = 7; -} diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto b/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto deleted file mode 100644 index 3cd272de3172e..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v4alpha/udp_listener_config.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v4alpha; - -import "envoy/config/core/v4alpha/udp_socket_config.proto"; -import "envoy/config/listener/v4alpha/quic_config.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; -option java_outer_classname = "UdpListenerConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: UDP listener config] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 8] -message UdpListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.UdpListenerConfig"; - - reserved 1, 2, 3, 4, 6; - - reserved "config"; - - // UDP socket configuration for the listener. The default for - // :ref:`prefer_gro ` is false for - // listener sockets. If receiving a large amount of datagrams from a small number of sources, it - // may be worthwhile to enable this option after performance testing. - core.v4alpha.UdpSocketConfig downstream_socket_config = 5; - - // Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set - // to the default object to enable QUIC without modifying any additional options. - // - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - QuicProtocolOptions quic_options = 7; -} - -message ActiveRawUdpListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v3.ActiveRawUdpListenerConfig"; -} diff --git a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto index 1cdd6d183e9db..df3c71e6a6308 100644 --- a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto +++ b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto @@ -21,6 +21,17 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. +// +// Example: +// +// .. code-block:: yaml +// +// stats_sinks: +// - name: envoy.stat_sinks.metrics_service +// typed_config: +// "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig +// transport_api_version: V3 +// // [#extension: envoy.stat_sinks.metrics_service] message MetricsServiceConfig { option (udpa.annotations.versioning).previous_message_type = diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD b/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD deleted file mode 100644 index 9f8473e290ae3..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/metrics/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto deleted file mode 100644 index fe530b34e6908..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v4alpha/metrics_service.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Metrics service] - -// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink -// `. This opaque configuration will be used to create -// Metrics Service. -// [#extension: envoy.stat_sinks.metrics_service] -message MetricsServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.MetricsServiceConfig"; - - // The upstream gRPC cluster that hosts the metrics service. - core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; - - // API version for metric service transport protocol. This describes the metric service gRPC - // endpoint and version of messages used on the wire. - core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; - - // If true, counters are reported as the delta between flushing intervals. Otherwise, the current - // counter value is reported. Defaults to false. - // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the - // sink will take updates from the :ref:`MetricsResponse `. - google.protobuf.BoolValue report_counters_as_deltas = 2; - - // If true, metrics will have their tags emitted as labels on the metrics objects sent to the MetricsService, - // and the tag extracted name will be used instead of the full name, which may contain values used by the tag - // extractor or additional tags added during stats creation. - bool emit_tags_as_labels = 4; -} diff --git a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto b/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto deleted file mode 100644 index 6d8a94050d65a..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v4alpha/stats.proto +++ /dev/null @@ -1,411 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; -option java_outer_classname = "StatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - -// Configuration for pluggable stats sinks. -message StatsSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsSink"; - - reserved 2; - - reserved "config"; - - // The name of the stats sink to instantiate. The name must match a supported - // stats sink. - // See the :ref:`extensions listed in typed_config below ` for the default list of available stats sink. - // Sinks optionally support tagged/multiple dimensional metrics. - string name = 1; - - // Stats sink specific configuration which depends on the sink being instantiated. See - // :ref:`StatsdSink ` for an example. - // [#extension-category: envoy.stats_sinks] - oneof config_type { - google.protobuf.Any typed_config = 3; - } -} - -// Statistics configuration such as tagging. -message StatsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.StatsConfig"; - - // Each stat name is iteratively processed through these tag specifiers. - // When a tag is matched, the first capture group is removed from the name so - // later :ref:`TagSpecifiers ` cannot match that - // same portion of the match. - repeated TagSpecifier stats_tags = 1; - - // Use all default tag regexes specified in Envoy. These can be combined with - // custom tags specified in :ref:`stats_tags - // `. They will be processed before - // the custom tags. - // - // .. note:: - // - // If any default tags are specified twice, the config will be considered - // invalid. - // - // See :repo:`well_known_names.h ` for a list of the - // default tags in Envoy. - // - // If not provided, the value is assumed to be true. - google.protobuf.BoolValue use_all_default_tags = 2; - - // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated - // as normal. Preventing the instantiation of certain families of stats can improve memory - // performance for Envoys running especially large configs. - // - // .. warning:: - // Excluding stats may affect Envoy's behavior in undocumented ways. See - // `issue #8771 `_ for more information. - // If any unexpected behavior changes are observed, please open a new issue immediately. - StatsMatcher stats_matcher = 3; - - // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first - // match is applied. If no match is found (or if no rules are set), the following default buckets - // are used: - // - // .. code-block:: json - // - // [ - // 0.5, - // 1, - // 5, - // 10, - // 25, - // 50, - // 100, - // 250, - // 500, - // 1000, - // 2500, - // 5000, - // 10000, - // 30000, - // 60000, - // 300000, - // 600000, - // 1800000, - // 3600000 - // ] - repeated HistogramBucketSettings histogram_bucket_settings = 4; -} - -// Configuration for disabling stat instantiation. -message StatsMatcher { - // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to - // instantiate all stats, there is no need to construct a StatsMatcher. - // - // However, StatsMatcher can be used to limit the creation of families of stats in order to - // conserve memory. Stats can either be disabled entirely, or they can be - // limited by either an exclusion or an inclusion list of :ref:`StringMatcher - // ` protos: - // - // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to - // `false`, all stats will be instantiated. - // - // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the - // list will not instantiate. - // - // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of - // the StringMatchers in the list. - // - // - // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. - // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based - // matcher rather than a regex-based matcher. - // - // Example 1. Excluding all stats. - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "rejectAll": "true" - // } - // } - // - // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "exclusionList": { - // "patterns": [ - // { - // "prefix": "cluster." - // } - // ] - // } - // } - // } - // - // Example 3. Including only manager-related stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "inclusionList": { - // "patterns": [ - // { - // "prefix": "cluster_manager." - // }, - // { - // "prefix": "listener_manager." - // } - // ] - // } - // } - // } - // - - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.StatsMatcher"; - - oneof stats_matcher { - option (validate.required) = true; - - // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all - // stats are enabled. - bool reject_all = 1; - - // Exclusive match. All stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.v4alpha.ListStringMatcher exclusion_list = 2; - - // Inclusive match. No stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.v4alpha.ListStringMatcher inclusion_list = 3; - } -} - -// Designates a tag name and value pair. The value may be either a fixed value -// or a regex providing the value via capture groups. The specified tag will be -// unconditionally set if a fixed value, otherwise it will only be set if one -// or more capture groups in the regex match. -message TagSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.TagSpecifier"; - - // Attaches an identifier to the tag values to identify the tag being in the - // sink. Envoy has a set of default names and regexes to extract dynamic - // portions of existing stats, which can be found in :repo:`well_known_names.h - // ` in the Envoy repository. If a :ref:`tag_name - // ` is provided in the config and - // neither :ref:`regex ` or - // :ref:`fixed_value ` were specified, - // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. - // - // .. note:: - // - // It is invalid to specify the same tag name twice in a config. - string tag_name = 1; - - oneof tag_value { - // Designates a tag to strip from the tag extracted name and provide as a named - // tag value for all statistics. This will only occur if any part of the name - // matches the regex provided with one or more capture groups. - // - // The first capture group identifies the portion of the name to remove. The - // second capture group (which will normally be nested inside the first) will - // designate the value of the tag for the statistic. If no second capture - // group is provided, the first will also be used to set the value of the tag. - // All other capture groups will be ignored. - // - // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and - // one tag specifier: - // - // .. code-block:: json - // - // { - // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\\.((.+?)\\.)" - // } - // - // Note that the regex will remove ``foo_cluster.`` making the tag extracted - // name ``cluster.upstream_rq_timeout`` and the tag value for - // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no - // ``.`` character because of the second capture group). - // - // Example 2. a stat name - // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two - // tag specifiers: - // - // .. code-block:: json - // - // [ - // { - // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" - // }, - // { - // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\\.((.*?)\\.)" - // } - // ] - // - // The two regexes of the specifiers will be processed in the definition order. - // - // The first regex will remove ``ios.``, leaving the tag extracted name - // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - // ``envoy.http_user_agent`` will be added with tag value ``ios``. - // - // The second regex will remove ``connection_manager_1.`` from the tag - // extracted name produced by the first regex - // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - // ``envoy.http_conn_manager_prefix`` will be added with the tag value - // ``connection_manager_1``. - string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; - - // Specifies a fixed tag value for the ``tag_name``. - string fixed_value = 3; - } -} - -// Specifies a matcher for stats and the buckets that matching stats should use. -message HistogramBucketSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.HistogramBucketSettings"; - - // The stats that this rule applies to. The match is applied to the original stat name - // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. - type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; - - // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. - // The order of the buckets does not matter. - repeated double buckets = 2 [(validate.rules).repeated = { - min_items: 1 - unique: true - items {double {gt: 0.0}} - }]; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support -// tagged metrics. -// [#extension: envoy.stat_sinks.statsd] -message StatsdSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsdSink"; - - oneof statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - core.v4alpha.Address address = 1; - - // The name of a cluster that is running a TCP `statsd - // `_ compliant listener. If specified, - // Envoy will connect to this cluster to flush statistics. - string tcp_cluster_name = 2; - } - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms - string prefix = 3; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. -// The sink emits stats with `DogStatsD `_ -// compatible tags. Tags are configurable via :ref:`StatsConfig -// `. -// [#extension: envoy.stat_sinks.dog_statsd] -message DogStatsdSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.DogStatsdSink"; - - reserved 2; - - oneof dog_statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. - core.v4alpha.Address address = 1; - } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; - - // Optional max datagram size to use when sending UDP messages. By default Envoy - // will emit one metric per datagram. By specifying a max-size larger than a single - // metric, Envoy will emit multiple, new-line separated metrics. The max datagram - // size should not exceed your network's MTU. - // - // Note that this value may not be respected if smaller than a single metric. - google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. -// The sink emits stats in `text/event-stream -// `_ -// formatted stream for use by `Hystrix dashboard -// `_. -// -// Note that only a single HystrixSink should be configured. -// -// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. -// [#extension: envoy.stat_sinks.hystrix] -message HystrixSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v3.HystrixSink"; - - // The number of buckets the rolling statistical window is divided into. - // - // Each time the sink is flushed, all relevant Envoy statistics are sampled and - // added to the rolling window (removing the oldest samples in the window - // in the process). The sink then outputs the aggregate statistics across the - // current rolling window to the event stream(s). - // - // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets - // - // More detailed explanation can be found in `Hystrix wiki - // `_. - int64 num_buckets = 1; -} diff --git a/generated_api_shadow/envoy/config/overload/v3/overload.proto b/generated_api_shadow/envoy/config/overload/v3/overload.proto index 1211e1cd49ddd..5ff2222987f6f 100644 --- a/generated_api_shadow/envoy/config/overload/v3/overload.proto +++ b/generated_api_shadow/envoy/config/overload/v3/overload.proto @@ -142,6 +142,26 @@ message OverloadAction { google.protobuf.Any typed_config = 3; } +// Configuration for which accounts the WatermarkBuffer Factories should +// track. +message BufferFactoryConfig { + // The minimum power of two at which Envoy starts tracking an account. + // + // Envoy has 8 power of two buckets starting with the provided exponent below. + // Concretely the 1st bucket contains accounts for streams that use + // [2^minimum_account_to_track_power_of_two, + // 2^(minimum_account_to_track_power_of_two + 1)) bytes. + // With the 8th bucket tracking accounts + // >= 128 * 2^minimum_account_to_track_power_of_two. + // + // The maximum value is 56, since we're using uint64_t for bytes counting, + // and that's the last value that would use the 8 buckets. In practice, + // we don't expect the proxy to be holding 2^56 bytes. + // + // If omitted, Envoy should not do any tracking. + uint32 minimum_account_to_track_power_of_two = 1 [(validate.rules).uint32 = {lte: 56 gte: 10}]; +} + message OverloadManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.overload.v2alpha.OverloadManager"; @@ -154,4 +174,7 @@ message OverloadManager { // The set of overload actions. repeated OverloadAction actions = 3; + + // Configuration for buffer factory. + BufferFactoryConfig buffer_factory_config = 4; } diff --git a/generated_api_shadow/envoy/config/ratelimit/v4alpha/BUILD b/generated_api_shadow/envoy/config/ratelimit/v4alpha/BUILD deleted file mode 100644 index f335ebe20e6b2..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/ratelimit/v4alpha/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v4alpha/rls.proto deleted file mode 100644 index 7a13efd7395e4..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v4alpha/rls.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.config.ratelimit.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.ratelimit.v4alpha"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rate limit service] - -// Rate limit :ref:`configuration overview `. -message RateLimitServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.ratelimit.v3.RateLimitServiceConfig"; - - reserved 1, 3; - - // Specifies the gRPC service that hosts the rate limit service. The client - // will connect to this cluster when it needs to make rate limit service - // requests. - core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and - // version of messages used on the wire. - core.v4alpha.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/rbac/v3/BUILD b/generated_api_shadow/envoy/config/rbac/v3/BUILD index c5246439c7b55..c289def1f11d2 100644 --- a/generated_api_shadow/envoy/config/rbac/v3/BUILD +++ b/generated_api_shadow/envoy/config/rbac/v3/BUILD @@ -10,6 +10,7 @@ api_proto_package( "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", diff --git a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto index 44b3cf7cee6ec..d66f9be2b4981 100644 --- a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto +++ b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto @@ -7,6 +7,7 @@ import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/path.proto"; import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/v3/range.proto"; import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; @@ -145,7 +146,7 @@ message Policy { } // Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 11] +// [#next-free-field: 12] message Permission { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission"; @@ -185,6 +186,9 @@ message Permission { // A port number that describes the destination port connecting to. uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; + // A port number range that describes a range of destination ports connecting to. + type.v3.Int32Range destination_port_range = 11; + // Metadata that describes additional information about the action. type.matcher.v3.MetadataMatcher metadata = 7; diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD deleted file mode 100644 index ddf34cc1032bc..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/rbac/v3:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto deleted file mode 100644 index 3b27e68bba1dc..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v4alpha/rbac.proto +++ /dev/null @@ -1,301 +0,0 @@ -syntax = "proto3"; - -package envoy.config.rbac.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/metadata.proto"; -import "envoy/type/matcher/v4alpha/path.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/api/expr/v1alpha1/checked.proto"; -import "google/api/expr/v1alpha1/syntax.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.rbac.v4alpha"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Role Based Access Control (RBAC)] - -// Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. Requests are allowed or denied based on the `action` and whether a matching policy is -// found. For instance, if the action is ALLOW and a matching policy is found the request should be -// allowed. -// -// RBAC can also be used to make access logging decisions by communicating with access loggers -// through dynamic metadata. When the action is LOG and at least one policy matches, the -// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating -// the request should be logged. -// -// Here is an example of RBAC configuration. It has two policies: -// -// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so -// does "cluster.local/ns/default/sa/superuser". -// -// * Any user can read ("GET") the service at paths with prefix "/products", so long as the -// destination port is either 80 or 443. -// -// .. code-block:: yaml -// -// action: ALLOW -// policies: -// "service-admin": -// permissions: -// - any: true -// principals: -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/admin" -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/superuser" -// "product-viewer": -// permissions: -// - and_rules: -// rules: -// - header: -// name: ":method" -// string_match: -// exact: "GET" -// - url_path: -// path: { prefix: "/products" } -// - or_rules: -// rules: -// - destination_port: 80 -// - destination_port: 443 -// principals: -// - any: true -// -message RBAC { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.RBAC"; - - // Should we do safe-list or block-list style access control? - enum Action { - // The policies grant access to principals. The rest are denied. This is safe-list style - // access control. This is the default type. - ALLOW = 0; - - // The policies deny access to principals. The rest are allowed. This is block-list style - // access control. - DENY = 1; - - // The policies set the `access_log_hint` dynamic metadata key based on if requests match. - // All requests are allowed. - LOG = 2; - } - - // The action to take if a policy matches. Every action either allows or denies a request, - // and can also carry out action-specific operations. - // - // Actions: - // - // * ALLOW: Allows the request if and only if there is a policy that matches - // the request. - // * DENY: Allows the request if and only if there are no policies that - // match the request. - // * LOG: Allows all requests. If at least one policy matches, the dynamic - // metadata key `access_log_hint` is set to the value `true` under the shared - // key namespace 'envoy.common'. If no policies match, it is set to `false`. - // Other actions do not modify this key. - // - Action action = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maps from policy name to policy. A match occurs when at least one policy matches the request. - // The policies are evaluated in lexicographic order of the policy name. - map policies = 2; -} - -// Policy specifies a role and the principals that are assigned/denied the role. -// A policy matches if and only if at least one of its permissions match the -// action taking place AND at least one of its principals match the downstream -// AND the condition is true if specified. -message Policy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; - - // Required. The set of permissions that define a role. Each permission is - // matched with OR semantics. To match all actions for this policy, a single - // Permission with the `any` field set to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Required. The set of principals that are assigned/denied the role based on - // “action”. Each principal is matched with OR semantics. To match all - // downstreams for this policy, a single Principal with the `any` field set to - // true should be used. - repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - - oneof expression_specifier { - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - // Only be used when checked_condition is not used. - google.api.expr.v1alpha1.Expr condition = 3; - - // [#not-implemented-hide:] - // An optional symbolic expression that has been successfully type checked. - // Only be used when condition is not used. - google.api.expr.v1alpha1.CheckedExpr checked_condition = 4; - } -} - -// Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 11] -message Permission { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Permission"; - - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, - // each are applied with the associated behavior. - message Set { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v3.Permission.Set"; - - repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof rule { - option (validate.required) = true; - - // A set of rules that all must match in order to define the action. - Set and_rules = 1; - - // A set of rules where at least one must match in order to define the action. - Set or_rules = 2; - - // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. - route.v4alpha.HeaderMatcher header = 4; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v4alpha.PathMatcher url_path = 10; - - // A CIDR block that describes the destination IP. - core.v4alpha.CidrRange destination_ip = 5; - - // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; - - // Metadata that describes additional information about the action. - type.matcher.v4alpha.MetadataMatcher metadata = 7; - - // Negates matching the provided permission. For instance, if the value of - // `not_rule` would match, this permission would not match. Conversely, if - // the value of `not_rule` would not match, this permission would match. - Permission not_rule = 8; - - // The request server from the client's connection request. This is - // typically TLS SNI. - // - // .. attention:: - // - // The behavior of this field may be affected by how Envoy is configured - // as explained below. - // - // * If the :ref:`TLS Inspector ` - // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name - // `, - // a TLS connection's requested SNI server name will be treated as if it - // wasn't present. - // - // * A :ref:`listener filter ` may - // overwrite a connection's requested server name within Envoy. - // - // Please refer to :ref:`this FAQ entry ` to learn to - // setup SNI. - type.matcher.v4alpha.StringMatcher requested_server_name = 9; - } -} - -// Principal defines an identity or a group of identities for a downstream -// subject. -// [#next-free-field: 12] -message Principal { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; - - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. - // Depending on the context, each are applied with the associated behavior. - message Set { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v3.Principal.Set"; - - repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Authentication attributes for a downstream. - message Authenticated { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v3.Principal.Authenticated"; - - reserved 1; - - // The name of the principal. If set, The URI SAN or DNS SAN in that order - // is used from the certificate, otherwise the subject field is used. If - // unset, it applies to any user that is authenticated. - type.matcher.v4alpha.StringMatcher principal_name = 2; - } - - oneof identifier { - option (validate.required) = true; - - // A set of identifiers that all must match in order to define the - // downstream. - Set and_ids = 1; - - // A set of identifiers at least one must match in order to define the - // downstream. - Set or_ids = 2; - - // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // Authenticated attributes that identify the downstream. - Authenticated authenticated = 4; - - // A CIDR block that describes the downstream IP. - // This address will honor proxy protocol, but will not honor XFF. - core.v4alpha.CidrRange hidden_envoy_deprecated_source_ip = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is - // inferred from for example the x-forwarder-for header, proxy protocol, - // etc. - core.v4alpha.CidrRange direct_remote_ip = 10; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip - // `. E.g, if the - // remote ip is inferred from for example the x-forwarder-for header, proxy - // protocol, etc. - core.v4alpha.CidrRange remote_ip = 11; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP - // request. Only available for HTTP request. Note: the pseudo-header :path - // includes the query and fragment string. Use the `url_path` field if you - // want to match the URL path without the query and fragment string. - route.v4alpha.HeaderMatcher header = 6; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v4alpha.PathMatcher url_path = 9; - - // Metadata that describes additional information about the principal. - type.matcher.v4alpha.MetadataMatcher metadata = 7; - - // Negates matching the provided principal. For instance, if the value of - // `not_id` would match, this principal would not match. Conversely, if the - // value of `not_id` would not match, this principal would match. - Principal not_id = 8; - } -} diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 93ae347ace732..8930f9ec8dff3 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -5,6 +5,7 @@ package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; +import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/metadata/v3/metadata.proto"; @@ -314,16 +315,38 @@ message Route { message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster"; - // [#next-free-field: 12] + // [#next-free-field: 13] message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster.ClusterWeight"; reserved 7; + // Only one of *name* and *cluster_header* may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_len: 1}]; + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier"]; + + // Only one of *name* and *cluster_header* may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string cluster_header = 12 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier" + ]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, @@ -409,7 +432,7 @@ message WeightedCluster { string runtime_key_prefix = 2; } -// [#next-free-field: 13] +// [#next-free-field: 14] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; @@ -527,6 +550,12 @@ message RouteMatch { // // [#next-major-version: unify with RBAC] TlsContextMatchOptions tls_context = 11; + + // Specifies a set of dynamic metadata matchers on which the route should match. + // The router will check the dynamic metadata against all the specified dynamic metadata matchers. + // If the number of specified dynamic metadata matchers is nonzero, they all must match the + // dynamic metadata for a match to occur. + repeated type.matcher.v3.MetadataMatcher dynamic_metadata = 13; } // [#next-free-field: 12] diff --git a/generated_api_shadow/envoy/config/route/v4alpha/BUILD b/generated_api_shadow/envoy/config/route/v4alpha/BUILD deleted file mode 100644 index 569a1a438e075..0000000000000 --- a/generated_api_shadow/envoy/config/route/v4alpha/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/metadata/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route.proto b/generated_api_shadow/envoy/config/route/v4alpha/route.proto deleted file mode 100644 index 4a19386824821..0000000000000 --- a/generated_api_shadow/envoy/config/route/v4alpha/route.proto +++ /dev/null @@ -1,146 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP route configuration] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// [#next-free-field: 13] -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteConfiguration"; - - // The name of the route configuration. For example, it might match - // :ref:`route_config_name - // ` in - // :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. - string name = 1; - - // An array of virtual hosts that make up the route table. - repeated VirtualHost virtual_hosts = 2; - - // An array of virtual hosts will be dynamically loaded via the VHDS API. - // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - // on-demand discovery of virtual hosts. The contents of these two fields will be merged to - // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - // taking precedence. - Vhds vhds = 9; - - // Optionally specifies a list of HTTP headers that the connection manager - // will consider to be internal only. If they are found on external requests they will be cleaned - // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information. - repeated string internal_only_headers = 3 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each response that - // the connection manager encodes. Headers specified at this level are applied - // after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // that the connection manager encodes. - repeated string response_headers_to_remove = 5 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each request - // routed by the HTTP connection manager. Headers specified at this level are - // applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // routed by the HTTP connection manager. - repeated string request_headers_to_remove = 8 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // By default, headers that should be added/removed are evaluated from most to least specific: - // - // * route level - // * virtual host level - // * connection manager level - // - // To allow setting overrides at the route or virtual host level, this order can be reversed - // by setting this option to true. Defaults to false. - // - // [#next-major-version: In the v3 API, this will default to true.] - bool most_specific_header_mutations_wins = 10; - - // An optional boolean that specifies whether the clusters that the route - // table refers to will be validated by the cluster manager. If set to true - // and a route refers to a non-existent cluster, the route table will not - // load. If set to false and a route refers to a non-existent cluster, the - // route table will load and the router filter will return a 404 if the route - // is selected at runtime. This setting defaults to true if the route table - // is statically defined via the :ref:`route_config - // ` - // option. This setting default to false if the route table is loaded dynamically via the - // :ref:`rds - // ` - // option. Users may wish to override the default behavior in certain cases (for example when - // using CDS with a static route table). - google.protobuf.BoolValue validate_clusters = 7; - - // The maximum bytes of the response :ref:`direct response body - // ` size. If not specified the default - // is 4096. - // - // .. warning:: - // - // Envoy currently holds the content of :ref:`direct response body - // ` in memory. Be careful setting - // this to be larger than the default 4KB, since the allocated memory for direct response body - // is not subject to data plane buffering controls. - // - google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; - - // [#not-implemented-hide:] - // A list of plugins and their configurations which may be used by a - // :ref:`envoy_v3_api_field_config.route.v3.RouteAction.cluster_specifier_plugin` - // within the route. All *extension.name* fields in this list must be unique. - repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; -} - -// Configuration for a cluster specifier plugin. -message ClusterSpecifierPlugin { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.ClusterSpecifierPlugin"; - - // The name of the plugin and its opaque configuration. - core.v4alpha.TypedExtensionConfig extension = 1; -} - -message Vhds { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Vhds"; - - // Configuration source specifier for VHDS. - core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto deleted file mode 100644 index f728067882451..0000000000000 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ /dev/null @@ -1,2067 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/proxy_protocol.proto"; -import "envoy/type/matcher/v4alpha/regex.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/metadata/v3/metadata.proto"; -import "envoy/type/tracing/v3/custom_tag.proto"; -import "envoy/type/v3/percent.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; -option java_outer_classname = "RouteComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP route components] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// The top level element in the routing configuration is a virtual host. Each virtual host has -// a logical name as well as a set of domains that get routed to it based on the incoming request's -// host header. This allows a single listener to service multiple top level domain path trees. Once -// a virtual host is selected based on the domain, the routes are processed in order to see which -// upstream cluster to route to or whether to perform a redirect. -// [#next-free-field: 21] -message VirtualHost { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.VirtualHost"; - - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - - reserved 9, 12; - - reserved "per_filter_config"; - - // The logical name of the virtual host. This is used when emitting certain - // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the suffix or prefix form. - // - // Domain search order: - // 1. Exact domain names: ``www.foo.com``. - // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. - // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. - // 4. Special wildcard ``*`` matching any domain. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // The longest wildcards match first. - // Only a single virtual host in the entire route configuration can match on ``*``. A domain - // must be unique across all virtual hosts or the config will fail to load. - // - // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. - repeated string domains = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} - }]; - - // The list of routes that will be matched, in order, for incoming requests. - // The first route that matches will be used. - repeated Route routes = 3; - - // Specifies the type of TLS enforcement the virtual host expects. If this option is not - // specified, there is no TLS requirement for the virtual host. - TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; - - // A list of virtual clusters defined for this virtual host. Virtual clusters - // are used for additional statistics gathering. - repeated VirtualCluster virtual_clusters = 5; - - // Specifies a set of rate limit configurations that will be applied to the - // virtual host. - repeated RateLimit rate_limits = 6; - - // Specifies a list of HTTP headers that should be added to each request - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // handled by this virtual host. - repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of HTTP headers that should be added to each response - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // handled by this virtual host. - repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Indicates that the virtual host has a CORS policy. - CorsPolicy cors = 8; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 15; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the upstream request. Setting this option will cause it to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the upstream - // will see the attempt count as perceived by the second Envoy. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - // - // [#next-major-version: rename to include_attempt_count_in_request.] - bool include_request_attempt_count = 14; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the downstream response. Setting this option will cause the router to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the downstream - // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - bool include_attempt_count_in_response = 19; - - // Indicates the retry policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - RetryPolicy retry_policy = 16; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that setting a route level entry - // will take precedence over this config and it'll be treated independently (e.g.: values are not - // inherited). :ref:`Retry policy ` should not be - // set if this field is used. - google.protobuf.Any retry_policy_typed_config = 20; - - // Indicates the hedge policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - HedgePolicy hedge_policy = 17; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum - // value of this and the listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; -} - -// A filter-defined action type. -message FilterAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.FilterAction"; - - google.protobuf.Any action = 1; -} - -// A route is both a specification of how to match a request as well as an indication of what to do -// next (e.g., redirect, forward, rewrite, etc.). -// -// .. attention:: -// -// Envoy supports routing on HTTP method via :ref:`header matching -// `. -// [#next-free-field: 19] -message Route { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Route"; - - reserved 6, 8; - - reserved "per_filter_config"; - - // Name for the route. - string name = 14; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - oneof action { - option (validate.required) = true; - - // Route request to some upstream cluster. - RouteAction route = 2; - - // Return a redirect. - RedirectAction redirect = 3; - - // Return an arbitrary HTTP response directly, without proxying. - DirectResponseAction direct_response = 7; - - // [#not-implemented-hide:] - // A filter-defined action (e.g., it could dynamically generate the RouteAction). - // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when - // implemented] - FilterAction filter_action = 17; - - // [#not-implemented-hide:] - // An action used when the route will generate a response directly, - // without forwarding to an upstream host. This will be used in non-proxy - // xDS clients like the gRPC server. It could also be used in the future - // in Envoy for a filter that directly generates responses for requests. - NonForwardingAction non_forwarding_action = 18; - } - - // The Metadata field can be used to provide additional information - // about the route. It can be used for configuration, stats, and logging. - // The metadata should go under the filter namespace that will need it. - // For instance, if the metadata is intended for the Router filter, - // the filter name should be specified as *envoy.filters.http.router*. - core.v4alpha.Metadata metadata = 4; - - // Decorator for the matched route. - Decorator decorator = 5; - - // The typed_per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 13; - - // Specifies a set of headers that will be added to requests matching this - // route. Headers specified at this level are applied before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // matching this route. - repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a set of headers that will be added to responses to requests - // matching this route. Headers specified at this level are applied before - // headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on - // :ref:`custom request headers `. - repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - Tracing tracing = 15; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; -} - -// Compared to the :ref:`cluster ` field that specifies a -// single upstream cluster as the target of a request, the :ref:`weighted_clusters -// ` option allows for specification of -// multiple upstream clusters along with weights that indicate the percentage of -// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the -// weights. -message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.WeightedCluster"; - - // [#next-free-field: 12] - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.WeightedCluster.ClusterWeight"; - - reserved 7, 8; - - reserved "per_filter_config"; - - // Name of the upstream cluster. The cluster must exist in the - // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // An integer between 0 and :ref:`total_weight - // `. When a request matches the route, - // the choice of an upstream cluster is determined by its weight. The sum of weights across all - // entries in the clusters array must add up to the total_weight, which defaults to 100. - google.protobuf.UInt32Value weight = 2; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered for - // load balancing. Note that this will be merged with what's provided in - // :ref:`RouteAction.metadata_match `, with - // values here taking precedence. The filter name should be specified as *envoy.lb*. - core.v4alpha.Metadata metadata_match = 3; - - // Specifies a list of headers to be added to requests when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request when - // this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of headers to be added to responses when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v4alpha.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of headers to be removed from responses when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 10; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 11 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the total weight across all clusters. The sum of all cluster weights must equal this - // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies the runtime key prefix that should be used to construct the - // runtime keys associated with each cluster. When the *runtime_key_prefix* is - // specified, the router will look for weights associated with each upstream - // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - // *cluster[i]* denotes an entry in the clusters array field. If the runtime - // key for the cluster does not exist, the value specified in the - // configuration file will be used as the default weight. See the :ref:`runtime documentation - // ` for how key names map to the underlying implementation. - string runtime_key_prefix = 2; -} - -// [#next-free-field: 13] -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; - - message GrpcRouteMatchOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions"; - } - - message TlsContextMatchOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteMatch.TlsContextMatchOptions"; - - // If specified, the route will match against whether or not a certificate is presented. - // If not specified, certificate presentation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue presented = 1; - - // If specified, the route will match against whether or not a certificate is validated. - // If not specified, certificate validation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue validated = 2; - } - - // An extensible message for matching CONNECT requests. - message ConnectMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteMatch.ConnectMatcher"; - } - - reserved 5, 3; - - reserved "regex"; - - oneof path_specifier { - option (validate.required) = true; - - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - string prefix = 1; - - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - string path = 2; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - - // If this is used as the matcher, the matcher will only match CONNECT requests. - // Note that this will not match HTTP/2 upgrade-style CONNECT requests - // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style - // upgrades. - // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where Extended CONNECT requests may have a path, the path matchers will work if - // there is a path present. - // Note that CONNECT support is currently considered alpha in Envoy. - // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] - ConnectMatcher connect_matcher = 12; - } - - // Indicates that prefix/path matching should be case sensitive. The default - // is true. Ignored for safe_regex matching. - google.protobuf.BoolValue case_sensitive = 4; - - // Indicates that the route should additionally match on a runtime key. Every time the route - // is considered for a match, it must also fall under the percentage of matches indicated by - // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the router continues to evaluate the remaining match criteria. A runtime_fraction - // route configuration can be used to roll out route changes in a gradual manner without full - // code/config deploys. Refer to the :ref:`traffic shifting - // ` docs for additional documentation. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - // integer with the assumption that the value is an integral percentage out of 100. For - // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - core.v4alpha.RuntimeFractionalPercent runtime_fraction = 9; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). - repeated HeaderMatcher headers = 6; - - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - repeated QueryParameterMatcher query_parameters = 7; - - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - GrpcRouteMatchOptions grpc = 8; - - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - TlsContextMatchOptions tls_context = 11; -} - -// [#next-free-field: 12] -message CorsPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.CorsPolicy"; - - reserved 1, 8, 7; - - reserved "allow_origin", "allow_origin_regex", "enabled"; - - // Specifies string patterns that match allowed origins. An origin is allowed if any of the - // string matchers match. - repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11; - - // Specifies the content for the *access-control-allow-methods* header. - string allow_methods = 2; - - // Specifies the content for the *access-control-allow-headers* header. - string allow_headers = 3; - - // Specifies the content for the *access-control-expose-headers* header. - string expose_headers = 4; - - // Specifies the content for the *access-control-max-age* header. - string max_age = 5; - - // Specifies whether the resource allows credentials. - google.protobuf.BoolValue allow_credentials = 6; - - oneof enabled_specifier { - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; - } - - // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not - // enforced. - // - // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those - // fields have to explicitly disable the filter in order for this setting to take effect. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* to determine if it's valid but will not enforce any policies. - core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; -} - -// [#next-free-field: 38] -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; - - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - - // Configures :ref:`internal redirect ` behavior. - // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] - enum InternalRedirectAction { - option deprecated = true; - - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - // - // .. note:: - // - // Shadowing will not be triggered if the primary cluster does not exist. - message RequestMirrorPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.RequestMirrorPolicy"; - - reserved 2; - - reserved "runtime_key"; - - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - core.v4alpha.RuntimeFractionalPercent runtime_fraction = 3; - - // Determines if the trace span should be sampled. Defaults to true. - google.protobuf.BoolValue trace_sampled = 4; - } - - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - // [#next-free-field: 7] - message HashPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy"; - - message Header { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.Header"; - - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If specified, the request header value will be rewritten and used - // to produce the hash key. - type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.Cookie"; - - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties"; - - // Hash on source IP address. - bool source_ip = 1; - } - - message QueryParameter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter"; - - // The name of the URL query parameter that will be used to obtain the hash - // key. If the parameter is not present, no hash will be produced. Query - // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_len: 1}]; - } - - message FilterState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.HashPolicy.FilterState"; - - // The name of the Object in the per-request filterState, which is an - // Envoy::Http::Hashable object. If there is no data associated with the key, - // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - - // Query parameter hash policy. - QueryParameter query_parameter = 5; - - // Filter state hash policy. - FilterState filter_state = 6; - } - - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:`upgrade_configs - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.UpgradeConfig"; - - // Configuration for sending data upstream as a raw data payload. This is used for - // CONNECT or POST requests, when forwarding request payload as raw TCP. - message ConnectConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; - - // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. - core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1; - - // If set, the route will also allow forwarding POST payload as raw TCP. - bool allow_post = 2; - } - - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - - // Configuration for sending data upstream as a raw data payload. This is used for - // CONNECT requests, when forwarding CONNECT payload as raw TCP. - // Note that CONNECT support is currently considered alpha in Envoy. - // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] - ConnectConfig connect_config = 3; - } - - message MaxStreamDuration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RouteAction.MaxStreamDuration"; - - // Specifies the maximum duration allowed for streams on the route. If not specified, the value - // from the :ref:`max_stream_duration - // ` field in - // :ref:`HttpConnectionManager.common_http_protocol_options - // ` - // is used. If this field is set explicitly to zero, any - // HttpConnectionManager max_stream_duration timeout will be disabled for - // this route. - google.protobuf.Duration max_stream_duration = 1; - - // If present, and the request contains a `grpc-timeout header - // `_, use that value as the - // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. - // If set to 0, the `grpc-timeout` header is used without modification. - google.protobuf.Duration grpc_timeout_header_max = 2; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by - // subtracting the provided duration from the header. This is useful for allowing Envoy to set - // its global timeout to be less than that of the deadline imposed by the calling client, which - // makes it more likely that Envoy will handle the timeout instead of having the call canceled - // by the client. If, after applying the offset, the resulting timeout is zero or negative, - // the stream will timeout immediately. - google.protobuf.Duration grpc_timeout_header_offset = 3; - } - - reserved 12, 18, 19, 16, 22, 21, 10; - - reserved "request_mirror_policy"; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string cluster_header = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - WeightedCluster weighted_clusters = 3; - - // [#not-implemented-hide:] - // Name of the cluster specifier plugin to use to determine the cluster for - // requests on this route. The plugin name must be defined in the associated - // :ref:`envoy_v3_api_field_config.route.v3.RouteConfiguration.cluster_specifier_plugins` - // in the - // :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. - string cluster_specifier_plugin = 37; - } - - // The HTTP status code to use when configured cluster is not found. - // The default response code is 503 Service Unavailable. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what's set in this field will be considered - // for load balancing. If using :ref:`weighted_clusters - // `, metadata will be merged, with values - // provided there taking precedence. The filter name should be specified as *envoy.lb*. - core.v4alpha.Metadata metadata_match = 4; - - // Indicates that during forwarding, the matched prefix (or path) should be - // swapped with this value. This option allows application URLs to be rooted - // at a different path from those exposed at the reverse proxy layer. The router filter will - // place the original path before rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of *prefix_rewrite* or - // :ref:`regex_rewrite ` - // may be specified. - // - // .. attention:: - // - // Pay careful attention to the use of trailing slashes in the - // :ref:`route's match ` prefix value. - // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single - // :ref:`Route `, as shown by the below config entries: - // - // .. code-block:: yaml - // - // - match: - // prefix: "/prefix/" - // route: - // prefix_rewrite: "/" - // - match: - // prefix: "/prefix" - // route: - // prefix_rewrite: "/" - // - // Having above entries in the config, requests to */prefix* will be stripped to */*, while - // requests to */prefix/etc* will be stripped to */etc*. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. The router filter will place the original path as it was - // before the rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of :ref:`prefix_rewrite ` - // or *regex_rewrite* may be specified. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 6 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - google.protobuf.BoolValue auto_host_rewrite = 7; - - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string host_rewrite_header = 29 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Indicates that during forwarding, the host header will be swapped with - // the result of the regex substitution executed on path value with query and fragment removed. - // This is useful for transitioning variable content between path segment and subdomain. - // - // For example with the following config: - // - // .. code-block:: yaml - // - // host_rewrite_path_regex: - // pattern: - // google_re2: {} - // regex: "^/(.+)/.+$" - // substitution: \1 - // - // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`. - type.matcher.v4alpha.RegexMatchAndSubstitute host_rewrite_path_regex = 35; - } - - // Specifies the upstream timeout for the route. If not specified, the default is 15s. This - // spans between the point at which the entire downstream request (i.e. end-of-stream) has been - // processed and when the upstream response has been completely processed. A value of 0 will - // disable the route's timeout. - // - // .. note:: - // - // This timeout includes all retries. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration timeout = 8; - - // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - // although the connection manager wide :ref:`stream_idle_timeout - // ` - // will still apply. A value of 0 will completely disable the route's idle timeout, even if a - // connection manager stream idle timeout is configured. - // - // The idle timeout is distinct to :ref:`timeout - // `, which provides an upper bound - // on the upstream response time; :ref:`idle_timeout - // ` instead bounds the amount - // of time the request's stream may be idle. - // - // After header decoding, the idle timeout will apply on downstream and - // upstream request events. Each time an encode/decode event for headers or - // data is processed for the stream, the timer will be reset. If the timeout - // fires, the stream is terminated with a 408 Request Timeout error code if no - // upstream response header has been received, otherwise a stream reset - // occurs. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled according to the value for - // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - google.protobuf.Duration idle_timeout = 24; - - // Indicates that the route has a retry policy. Note that if this is set, - // it'll take precedence over the virtual host level retry policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - RetryPolicy retry_policy = 9; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that if this is set, it'll take - // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, - // most internal one becomes the enforced policy). :ref:`Retry policy ` - // should not be set if this field is used. - google.protobuf.Any retry_policy_typed_config = 33; - - // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 30; - - // Optionally specifies the :ref:`routing priority `. - core.v4alpha.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; - - // Specifies a set of rate limit configurations that could be applied to the - // route. - repeated RateLimit rate_limits = 13; - - // Specifies if the rate limit filter should include the virtual host rate - // limits. By default, if the route configured rate limits, the virtual host - // :ref:`rate_limits ` are not applied to the - // request. - // - // This field is deprecated. Please use :ref:`vh_rate_limits ` - google.protobuf.BoolValue hidden_envoy_deprecated_include_vh_rate_limits = 14 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Specifies a list of hash policies to use for ring hash load balancing. Each - // hash policy is evaluated individually and the combined result is used to - // route the request. The method of combination is deterministic such that - // identical lists of hash policies will produce the same hash. Since a hash - // policy examines specific parts of a request, it can fail to produce a hash - // (i.e. if the hashed header is not present). If (and only if) all configured - // hash policies fail to generate a hash, no hash will be produced for - // the route. In this case, the behavior is the same as if no hash policies - // were specified (i.e. the ring hash load balancer will choose a random - // backend). If a hash policy has the "terminal" attribute set to true, and - // there is already a hash generated, the hash is returned immediately, - // ignoring the rest of the hash policy list. - repeated HashPolicy hash_policy = 15; - - // Indicates that the route has a CORS policy. - CorsPolicy cors = 17; - - // Deprecated by :ref:`grpc_timeout_header_max ` - // If present, and the request is a gRPC request, use the - // `grpc-timeout header `_, - // or its default value (infinity) instead of - // :ref:`timeout `, but limit the applied timeout - // to the maximum value specified here. If configured as 0, the maximum allowed timeout for - // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - // and gRPC requests time out like any other requests using - // :ref:`timeout ` or its default. - // This can be used to prevent unexpected upstream request timeouts due to potentially long - // time gaps between gRPC request and response in gRPC streaming mode. - // - // .. note:: - // - // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - // precedence over `grpc-timeout header `_, when - // both are present. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration hidden_envoy_deprecated_max_grpc_timeout = 23 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Deprecated by :ref:`grpc_timeout_header_offset `. - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - google.protobuf.Duration hidden_envoy_deprecated_grpc_timeout_offset = 28 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - repeated UpgradeConfig upgrade_configs = 25; - - // If present, Envoy will try to follow an upstream redirect response instead of proxying the - // response back to the downstream. An upstream redirect response is defined - // by :ref:`redirect_response_codes - // `. - InternalRedirectPolicy internal_redirect_policy = 34; - - InternalRedirectAction hidden_envoy_deprecated_internal_redirect_action = 26 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value hidden_envoy_deprecated_max_internal_redirects = 31 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - HedgePolicy hedge_policy = 27; - - // Specifies the maximum stream duration for this route. - MaxStreamDuration max_stream_duration = 36; -} - -// HTTP retry :ref:`architecture overview `. -// [#next-free-field: 12] -message RetryPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy"; - - enum ResetHeaderFormat { - SECONDS = 0; - UNIX_TIMESTAMP = 1; - } - - message RetryPriority { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.RetryPriority"; - - reserved 2; - - reserved "config"; - - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // [#extension-category: envoy.retry_priorities] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - message RetryHostPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.RetryHostPredicate"; - - reserved 2; - - reserved "config"; - - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // [#extension-category: envoy.retry_host_predicates] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - message RetryBackOff { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.RetryBackOff"; - - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - message ResetHeader { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.ResetHeader"; - - // The name of the reset header. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The format of the reset header. - ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // A retry back-off strategy that applies when the upstream server rate limits - // the request. - // - // Given this configuration: - // - // .. code-block:: yaml - // - // rate_limited_retry_back_off: - // reset_headers: - // - name: Retry-After - // format: SECONDS - // - name: X-RateLimit-Reset - // format: UNIX_TIMESTAMP - // max_interval: "300s" - // - // The following algorithm will apply: - // - // 1. If the response contains the header ``Retry-After`` its value must be on - // the form ``120`` (an integer that represents the number of seconds to - // wait before retrying). If so, this value is used as the back-off interval. - // 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its - // value must be on the form ``1595320702`` (an integer that represents the - // point in time at which to retry, as a Unix timestamp in seconds). If so, - // the current time is subtracted from this value and the result is used as - // the back-off interval. - // 3. Otherwise, Envoy will use the default - // :ref:`exponential back-off ` - // strategy. - // - // No matter which format is used, if the resulting back-off interval exceeds - // ``max_interval`` it is discarded and the next header in ``reset_headers`` - // is tried. If a request timeout is configured for the route it will further - // limit how long the request will be allowed to run. - // - // To prevent many clients retrying at the same point in time jitter is added - // to the back-off interval, so the resulting interval is decided by taking: - // ``random(interval, interval * 1.5)``. - // - // .. attention:: - // - // Configuring ``rate_limited_retry_back_off`` will not by itself cause a request - // to be retried. You will still need to configure the right retry policy to match - // the responses from the upstream server. - message RateLimitedRetryBackOff { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff"; - - // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) - // to match against the response. Headers are tried in order, and matched case - // insensitive. The first header to be parsed successfully is used. If no headers - // match the default exponential back-off is used instead. - repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the maximum back off interval that Envoy will allow. If a reset - // header contains an interval longer than this then it will be discarded and - // the next header will be tried. Defaults to 300 seconds. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - // Specifies the conditions under which retry takes place. These are the same - // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - string retry_on = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. These are the same conditions documented for - // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value max_retries = 2; - - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - // - // .. note:: - // - // If left unspecified, Envoy will use the global - // :ref:`route timeout ` for the request. - // Consequently, when using a :ref:`5xx ` based - // retry policy, a request that times out will not be retried as the total timeout budget - // would have been exhausted. - google.protobuf.Duration per_try_timeout = 3; - - // Specifies an implementation of a RetryPriority which is used to determine the - // distribution of load across priorities used for retries. Refer to - // :ref:`retry plugin configuration ` for more details. - RetryPriority retry_priority = 4; - - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - // for retries. If any of the predicates reject the host, host selection will be reattempted. - // Refer to :ref:`retry plugin configuration ` for more - // details. - repeated RetryHostPredicate retry_host_predicate = 5; - - // The maximum number of times host selection will be reattempted before giving up, at which - // point the host that was last selected will be routed to. If unspecified, this will default to - // retrying once. - int64 host_selection_retry_max_attempts = 6; - - // HTTP status codes that should trigger a retry in addition to those specified by retry_on. - repeated uint32 retriable_status_codes = 7; - - // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the - // default base interval is 25 milliseconds or, if set, the current value of the - // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - // describes Envoy's back-off algorithm. - RetryBackOff retry_back_off = 8; - - // Specifies parameters that control a retry back-off strategy that is used - // when the request is rate limited by the upstream server. The server may - // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to - // provide feedback to the client on how long to wait before retrying. If - // configured, this back-off strategy will be used instead of the - // default exponential back off strategy (configured using `retry_back_off`) - // whenever a response includes the matching headers. - RateLimitedRetryBackOff rate_limited_retry_back_off = 11; - - // HTTP response headers that trigger a retry if present in the response. A retry will be - // triggered if any of the header matches match the upstream response headers. - // The field is only consulted if 'retriable-headers' retry policy is active. - repeated HeaderMatcher retriable_headers = 9; - - // HTTP headers which must be present in the request for retries to be attempted. - repeated HeaderMatcher retriable_request_headers = 10; -} - -// HTTP request hedging :ref:`architecture overview `. -message HedgePolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.HedgePolicy"; - - // Specifies the number of initial requests that should be sent upstream. - // Must be at least 1. - // Defaults to 1. - // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies a probability that an additional upstream request should be sent - // on top of what is specified by initial_requests. - // Defaults to 0. - // [#not-implemented-hide:] - type.v3.FractionalPercent additional_request_chance = 2; - - // Indicates that a hedged request should be sent when the per-try timeout is hit. - // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. - // The first request to complete successfully will be the one returned to the caller. - // - // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. - // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client - // if there are no more retries left. - // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. - // - // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least - // one error code and specifies a maximum number of retries. - // - // Defaults to false. - bool hedge_on_per_try_timeout = 3; -} - -// [#next-free-field: 10] -message RedirectAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RedirectAction"; - - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - - // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection - oneof scheme_rewrite_specifier { - // The scheme portion of the URL will be swapped with "https". - bool https_redirect = 4; - - // The scheme portion of the URL will be swapped with this value. - string scheme_redirect = 7; - } - - // The host portion of the URL will be swapped with this value. - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The port value of the URL will be swapped with this value. - uint32 port_redirect = 8; - - oneof path_rewrite_specifier { - // The path portion of the URL will be swapped with this value. - // Please note that query string in path_redirect will override the - // request's query string and will not be stripped. - // - // For example, let's say we have the following routes: - // - // - match: { path: "/old-path-1" } - // redirect: { path_redirect: "/new-path-1" } - // - match: { path: "/old-path-2" } - // redirect: { path_redirect: "/new-path-2", strip-query: "true" } - // - match: { path: "/old-path-3" } - // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } - // - // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" - // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" - // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirection, the matched prefix (or path) - // should be swapped with this value. This option allows redirect URLs be dynamically created - // based on the request. - // - // .. attention:: - // - // Pay attention to the use of trailing slashes as mentioned in - // :ref:`RouteAction's prefix_rewrite `. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirect, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 9; - } - - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. - bool strip_query = 6; -} - -message DirectResponseAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.DirectResponseAction"; - - // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; - - // Specifies the content of the response body. If this setting is omitted, - // no body is included in the generated response. - // - // .. note:: - // - // Headers can be specified using *response_headers_to_add* in the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or - // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. - core.v4alpha.DataSource body = 2; -} - -// [#not-implemented-hide:] -message NonForwardingAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.NonForwardingAction"; -} - -message Decorator { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Decorator"; - - // The operation name associated with the request matched to this route. If tracing is - // enabled, this information will be used as the span name reported for this request. - // - // .. note:: - // - // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden - // by the :ref:`x-envoy-decorator-operation - // ` header. - string operation = 1 [(validate.rules).string = {min_len: 1}]; - - // Whether the decorated details should be propagated to the other party. The default is true. - google.protobuf.BoolValue propagate = 2; -} - -message Tracing { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Tracing"; - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.v3.FractionalPercent client_sampling = 1; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.FractionalPercent random_sampling = 2; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.FractionalPercent overall_sampling = 3; - - // A list of custom tags with unique tag name to create tags for the active span. - // It will take effect after merging with the :ref:`corresponding configuration - // ` - // configured in the HTTP connection manager. If two tags with the same name are configured - // each in the HTTP connection manager and the route level, the one configured here takes - // priority. - repeated type.tracing.v3.CustomTag custom_tags = 4; -} - -// A virtual cluster is a way of specifying a regex matching rule against -// certain important endpoints such that statistics are generated explicitly for -// the matched requests. The reason this is useful is that when doing -// prefix/path matching Envoy does not always know what the application -// considers to be an endpoint. Thus, it’s impossible for Envoy to generically -// emit per endpoint statistics. However, often systems have highly critical -// endpoints that they wish to get “perfect” statistics on. Virtual cluster -// statistics are perfect in the sense that they are emitted on the downstream -// side such that they include network level failures. -// -// Documentation for :ref:`virtual cluster statistics `. -// -// .. note:: -// -// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for -// every application endpoint. This is both not easily maintainable and as well the matching and -// statistics output are not free. -message VirtualCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.VirtualCluster"; - - reserved 1, 3; - - reserved "pattern", "method"; - - // Specifies a list of header matchers to use for matching requests. Each specified header must - // match. The pseudo-headers `:path` and `:method` can be used to match the request path and - // method, respectively. - repeated HeaderMatcher headers = 4; - - // Specifies the name of the virtual cluster. The virtual cluster name as well - // as the virtual host name are used when emitting statistics. The statistics are emitted by the - // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Global rate limiting :ref:`architecture overview `. -// Also applies to Local rate limiting :ref:`using descriptors `. -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - - // [#next-free-field: 10] - message Action { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action"; - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("source_cluster", "") - // - // is derived from the :option:`--service-cluster` option. - message SourceCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.SourceCluster"; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("destination_cluster", "") - // - // Once a request matches against a route table rule, a routed cluster is determined by one of - // the following :ref:`route table configuration ` - // settings: - // - // * :ref:`cluster ` indicates the upstream cluster - // to route to. - // * :ref:`weighted_clusters ` - // chooses a cluster randomly from a set of clusters with attributed weight. - // * :ref:`cluster_header ` indicates which - // header in the request contains the target cluster. - message DestinationCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.DestinationCluster"; - } - - // The following descriptor entry is appended when a header contains a key that matches the - // *header_name*: - // - // .. code-block:: cpp - // - // ("", "") - message RequestHeaders { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.RequestHeaders"; - - // The header name to be queried from the request headers. The header’s - // value is used to populate the value of the descriptor entry for the - // descriptor_key. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; - - // If set to true, Envoy skips the descriptor while calling rate limiting service - // when header is not present in the request. By default it skips calling the - // rate limiting service if this header is not present in the request. - bool skip_if_absent = 3; - } - - // The following descriptor entry is appended to the descriptor and is populated using the - // trusted address from :ref:`x-forwarded-for `: - // - // .. code-block:: cpp - // - // ("remote_address", "") - message RemoteAddress { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.RemoteAddress"; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("generic_key", "") - message GenericKey { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.GenericKey"; - - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; - - // An optional key to use in the descriptor entry. If not set it defaults - // to 'generic_key' as the descriptor key. - string descriptor_key = 2; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("header_match", "") - message HeaderValueMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.HeaderValueMatch"; - - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; - - // If set to true, the action will append a descriptor entry when the - // request matches the headers. If set to false, the action will append a - // descriptor entry when the request does not match the headers. The - // default value is true. - google.protobuf.BoolValue expect_match = 2; - - // Specifies a set of headers that the rate limit action should match - // on. The action will check the request’s headers against all the - // specified headers in the config. A match will happen if all the - // headers in the config are present in the request with the same values - // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; - } - - // The following descriptor entry is appended when the - // :ref:`dynamic metadata ` contains a key value: - // - // .. code-block:: cpp - // - // ("", "") - // - // .. attention:: - // This action has been deprecated in favor of the :ref:`metadata ` action - message DynamicMetaData { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; - - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // Metadata struct that defines the key and path to retrieve the string value. A match will - // only happen if the value in the dynamic metadata is of type string. - type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; - - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. - string default_value = 3; - } - - // The following descriptor entry is appended when the metadata contains a key value: - // - // .. code-block:: cpp - // - // ("", "") - message MetaData { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Action.MetaData"; - - enum Source { - // Query :ref:`dynamic metadata ` - DYNAMIC = 0; - - // Query :ref:`route entry metadata ` - ROUTE_ENTRY = 1; - } - - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // Metadata struct that defines the key and path to retrieve the string value. A match will - // only happen if the value in the metadata is of type string. - type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; - - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. - string default_value = 3; - - // Source of metadata - Source source = 4 [(validate.rules).enum = {defined_only: true}]; - } - - oneof action_specifier { - option (validate.required) = true; - - // Rate limit on source cluster. - SourceCluster source_cluster = 1; - - // Rate limit on destination cluster. - DestinationCluster destination_cluster = 2; - - // Rate limit on request headers. - RequestHeaders request_headers = 3; - - // Rate limit on remote address. - RemoteAddress remote_address = 4; - - // Rate limit on a generic key. - GenericKey generic_key = 5; - - // Rate limit on the existence of request headers. - HeaderValueMatch header_value_match = 6; - - // Rate limit on dynamic metadata. - // - // .. attention:: - // This field has been deprecated in favor of the :ref:`metadata ` field - DynamicMetaData hidden_envoy_deprecated_dynamic_metadata = 7 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - // Rate limit on metadata. - MetaData metadata = 8; - - // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. - // [#extension-category: envoy.rate_limit_descriptors] - core.v4alpha.TypedExtensionConfig extension = 9; - } - } - - message Override { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Override"; - - // Fetches the override from the dynamic metadata. - message DynamicMetadata { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.RateLimit.Override.DynamicMetadata"; - - // Metadata struct that defines the key and path to retrieve the struct value. - // The value must be a struct containing an integer "requests_per_unit" property - // and a "unit" property with a value parseable to :ref:`RateLimitUnit - // enum ` - type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; - } - - oneof override_specifier { - option (validate.required) = true; - - // Limit override from dynamic metadata. - DynamicMetadata dynamic_metadata = 1; - } - } - - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - - // A list of actions that are to be applied for this rate limit configuration. - // Order matters as the actions are processed sequentially and the descriptor - // is composed by appending descriptor entries in that sequence. If an action - // cannot append a descriptor entry, no descriptor is generated for the - // configuration. See :ref:`composing actions - // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; - - // An optional limit override to be appended to the descriptor produced by this - // rate limit configuration. If the override value is invalid or cannot be resolved - // from metadata, no override is provided. See :ref:`rate limit override - // ` for more information. - Override limit = 4; -} - -// .. attention:: -// -// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* -// header. Thus, if attempting to match on *Host*, match on *:authority* instead. -// -// .. attention:: -// -// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both -// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., -// -// .. code-block:: json -// -// { -// "name": ":method", -// "exact_match": "POST" -// } -// -// .. attention:: -// In the absence of any header match specifier, match will default to :ref:`present_match -// `. i.e, a request that has the :ref:`name -// ` header will match, regardless of the header's -// value. -// -// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] -// [#next-free-field: 14] -message HeaderMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.HeaderMatcher"; - - reserved 2, 3, 5; - - reserved "regex_match"; - - // Specifies the name of the header in the request. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Specifies how the header match will be performed to route the request. - oneof header_match_specifier { - // If specified, header match will be performed based on the value of the header. - // This field is deprecated. Please use :ref:`string_match `. - string hidden_envoy_deprecated_exact_match = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. - // This field is deprecated. Please use :ref:`string_match `. - type.matcher.v4alpha.RegexMatcher hidden_envoy_deprecated_safe_regex_match = 11 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting of - // an optional plus or minus sign followed by a sequence of digits. The rule will not match if - // the header value does not represent an integer. Match will fail for empty values, floating - // point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - // "-1somestring" - type.v3.Int64Range range_match = 6; - - // If specified as true, header match will be performed based on whether the header is in the - // request. If specified as false, header match will be performed based on whether the header is absent. - bool present_match = 7; - - // If specified, header match will be performed based on the prefix of the header value. - // Note: empty prefix is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string hidden_envoy_deprecated_prefix_match = 9 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on the suffix of the header value. - // Note: empty suffix is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string hidden_envoy_deprecated_suffix_match = 10 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on whether the header value contains - // the given value or not. - // Note: empty contains match is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. - string hidden_envoy_deprecated_contains_match = 12 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on the string match of the header value. - type.matcher.v4alpha.StringMatcher string_match = 13; - } - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. - bool invert_match = 8; -} - -// Query parameter matching treats the query string of a request's :path header -// as an ampersand-separated list of keys and/or key=value elements. -// [#next-free-field: 7] -message QueryParameterMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.QueryParameterMatcher"; - - reserved 3, 4; - - reserved "value", "regex"; - - // Specifies the name of a key that must be present in the requested - // *path*'s query string. - string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; - - oneof query_parameter_match_specifier { - // Specifies whether a query parameter value should match against a string. - type.matcher.v4alpha.StringMatcher string_match = 5 - [(validate.rules).message = {required: true}]; - - // Specifies whether a query parameter should be present. - bool present_match = 6; - } -} - -// HTTP Internal Redirect :ref:`architecture overview `. -message InternalRedirectPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.InternalRedirectPolicy"; - - // An internal redirect is not handled, unless the number of previous internal redirects that a - // downstream request has encountered is lower than this value. - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 1; - - // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, - // only 302 will be treated as internal redirect. - // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. - repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; - - // Specifies a list of predicates that are queried when an upstream response is deemed - // to trigger an internal redirect by all other criteria. Any predicate in the list can reject - // the redirect, causing the response to be proxied to downstream. - // [#extension-category: envoy.internal_redirect_predicates] - repeated core.v4alpha.TypedExtensionConfig predicates = 3; - - // Allow internal redirect to follow a target URI with a different scheme than the value of - // x-forwarded-proto. The default is false. - bool allow_cross_scheme_redirect = 4; -} - -// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the -// map value in -// :ref:`VirtualHost.typed_per_filter_config`, -// :ref:`Route.typed_per_filter_config`, -// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` -// to add additional flags to the filter. -// [#not-implemented-hide:] -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.FilterConfig"; - - // The filter config. - google.protobuf.Any config = 1; - - // If true, the filter is optional, meaning that if the client does - // not support the specified filter, it may ignore the map entry rather - // than rejecting the config. - bool is_optional = 2; -} diff --git a/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto b/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto deleted file mode 100644 index 4c640223f701c..0000000000000 --- a/generated_api_shadow/envoy/config/route/v4alpha/scoped_route.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; -option java_outer_classname = "ScopedRouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP scoped routing configuration] -// * Routing :ref:`architecture overview ` - -// Specifies a routing scope, which associates a -// :ref:`Key` to a -// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` (identified by its resource name). -// -// The HTTP connection manager builds up a table consisting of these Key to -// RouteConfiguration mappings, and looks up the RouteConfiguration to use per -// request according to the algorithm specified in the -// :ref:`scope_key_builder` -// assigned to the HttpConnectionManager. -// -// For example, with the following configurations (in YAML): -// -// HttpConnectionManager config: -// -// .. code:: -// -// ... -// scoped_routes: -// name: foo-scoped-routes -// scope_key_builder: -// fragments: -// - header_value_extractor: -// name: X-Route-Selector -// element_separator: , -// element: -// separator: = -// key: vip -// -// ScopedRouteConfiguration resources (specified statically via -// :ref:`scoped_route_configurations_list` -// or obtained dynamically via SRDS): -// -// .. code:: -// -// (1) -// name: route-scope1 -// route_configuration_name: route-config1 -// key: -// fragments: -// - string_key: 172.10.10.20 -// -// (2) -// name: route-scope2 -// route_configuration_name: route-config2 -// key: -// fragments: -// - string_key: 172.20.20.30 -// -// A request from a client such as: -// -// .. code:: -// -// GET / HTTP/1.1 -// Host: foo.com -// X-Route-Selector: vip=172.10.10.20 -// -// would result in the routing table defined by the `route-config1` -// RouteConfiguration being assigned to the HTTP request/stream. -// -message ScopedRouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.ScopedRouteConfiguration"; - - // Specifies a key which is matched against the output of the - // :ref:`scope_key_builder` - // specified in the HttpConnectionManager. The matching is done per HTTP - // request and is dependent on the order of the fragments contained in the - // Key. - message Key { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.ScopedRouteConfiguration.Key"; - - message Fragment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment"; - - oneof type { - option (validate.required) = true; - - // A string to match against. - string string_key = 1; - } - } - - // The ordered set of fragments to match against. The order must match the - // fragments in the corresponding - // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Whether the RouteConfiguration should be loaded on demand. - bool on_demand = 4; - - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an - // RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated - // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The key to match against. - Key key = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/BUILD b/generated_api_shadow/envoy/config/tap/v4alpha/BUILD deleted file mode 100644 index 95c7990fbc477..0000000000000 --- a/generated_api_shadow/envoy/config/tap/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/common/matcher/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/config/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto b/generated_api_shadow/envoy/config/tap/v4alpha/common.proto deleted file mode 100644 index f436c7947d6e7..0000000000000 --- a/generated_api_shadow/envoy/config/tap/v4alpha/common.proto +++ /dev/null @@ -1,281 +0,0 @@ -syntax = "proto3"; - -package envoy.config.tap.v4alpha; - -import "envoy/config/common/matcher/v4alpha/matcher.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.tap.v4alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common tap configuration] - -// Tap configuration. -message TapConfig { - // [#comment:TODO(mattklein123): Rate limiting] - - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.TapConfig"; - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - // Exactly one of :ref:`match ` and - // :ref:`match_config ` must be set. If both - // are set, the :ref:`match ` will be used. - MatchPredicate hidden_envoy_deprecated_match_config = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - // Exactly one of :ref:`match ` and - // :ref:`match_config ` must be set. If both - // are set, the :ref:`match ` will be used. - common.matcher.v4alpha.MatchPredicate match = 4; - - // The tap output configuration. If a match configuration matches a data source being tapped, - // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for - // which the tap matching is enabled. When not enabled, the request\connection will not be - // recorded. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - core.v4alpha.RuntimeFractionalPercent tap_enabled = 3; -} - -// Tap match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 11] -message MatchPredicate { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.MatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - - // HTTP request generic body match configuration. - HttpGenericBodyMatch http_request_generic_body_match = 9; - - // HTTP response generic body match configuration. - HttpGenericBodyMatch http_response_generic_body_match = 10; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.HttpHeadersMatch"; - - // HTTP headers to match. - repeated route.v4alpha.HeaderMatcher headers = 1; -} - -// HTTP generic body match configuration. -// List of text strings and hex strings to be located in HTTP body. -// All specified strings must be found in the HTTP body for positive match. -// The search may be limited to specified number of bytes from the body start. -// -// .. attention:: -// -// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. -// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified -// to scan only part of the http body. -message HttpGenericBodyMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.HttpGenericBodyMatch"; - - message GenericTextMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch"; - - oneof rule { - option (validate.required) = true; - - // Text string to be located in HTTP body. - string string_match = 1 [(validate.rules).string = {min_len: 1}]; - - // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; - } - } - - // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). - uint32 bytes_limit = 1; - - // List of patterns to match. - repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// Tap output configuration. -message OutputConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; - - // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple - // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; - - // For buffered tapping, the maximum amount of received body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_rx_bytes = 2; - - // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_tx_bytes = 3; - - // Indicates whether taps produce a single buffered message per tap, or multiple streamed - // messages per tap in the emitted :ref:`TraceWrapper - // ` messages. Note that streamed tapping does not - // mean that no buffering takes place. Buffering may be required if data is processed before a - // match can be determined. See the HTTP tap filter :ref:`streaming - // ` documentation for more information. - bool streaming = 4; -} - -// Tap output sink configuration. -message OutputSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputSink"; - - // Output format. All output is in the form of one or more :ref:`TraceWrapper - // ` messages. This enumeration indicates - // how those messages are written. Note that not all sinks support all output formats. See - // individual sink documentation for more information. - enum Format { - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_bytes - // ` field. This means that body data will be - // base64 encoded as per the `proto3 JSON mappings - // `_. - JSON_BODY_AS_BYTES = 0; - - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_string - // ` field. This means that body data will be - // string encoded as per the `proto3 JSON mappings - // `_. This format type is - // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the - // user wishes to view it directly without being forced to base64 decode the body. - JSON_BODY_AS_STRING = 1; - - // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes - // multiple binary messages without any length information the data stream will not be - // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) - // this output format makes consumption simpler. - PROTO_BINARY = 2; - - // Messages are written as a sequence tuples, where each tuple is the message length encoded - // as a `protobuf 32-bit varint - // `_ - // followed by the binary message. The messages can be read back using the language specific - // protobuf coded stream implementation to obtain the message length and the message. - PROTO_BINARY_LENGTH_DELIMITED = 3; - - // Text proto format. - PROTO_TEXT = 4; - } - - // Sink output format. - Format format = 1 [(validate.rules).enum = {defined_only: true}]; - - oneof output_sink_type { - option (validate.required) = true; - - // Tap output will be streamed out the :http:post:`/tap` admin endpoint. - // - // .. attention:: - // - // It is only allowed to specify the streaming admin output sink if the tap is being - // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has - // been configured to receive tap configuration from some other source (e.g., static - // file, XDS, etc.) configuring the streaming admin output type will fail. - StreamingAdminSink streaming_admin = 2; - - // Tap output will be written to a file per tap sink. - FilePerTapSink file_per_tap = 3; - - // [#not-implemented-hide:] - // GrpcService to stream data to. The format argument must be PROTO_BINARY. - // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] - StreamingGrpcSink streaming_grpc = 4; - } -} - -// Streaming admin sink configuration. -message StreamingAdminSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.StreamingAdminSink"; -} - -// The file per tap sink outputs a discrete file for every tapped stream. -message FilePerTapSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.FilePerTapSink"; - - // Path prefix. The output file will be of the form _.pb, where is an - // identifier distinguishing the recorded trace for stream instances (the Envoy - // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; -} - -// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC -// server. -message StreamingGrpcSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.tap.v3.StreamingGrpcSink"; - - // Opaque identifier, that will be sent back to the streaming grpc server. - string tap_id = 1; - - // The gRPC server that hosts the Tap Sink Service. - core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/zipkin.proto b/generated_api_shadow/envoy/config/trace/v3/zipkin.proto index 61d3f0805fd33..42e46ed69c649 100644 --- a/generated_api_shadow/envoy/config/trace/v3/zipkin.proto +++ b/generated_api_shadow/envoy/config/trace/v3/zipkin.proto @@ -53,8 +53,7 @@ message ZipkinConfig { string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. + // using a standard Zipkin installation. string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Determines whether a 128bit trace id will be used when creating a new @@ -65,8 +64,7 @@ message ZipkinConfig { // The default value is true. google.protobuf.BoolValue shared_span_context = 4; - // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be - // used. + // Determines the selected collector endpoint version. CollectorEndpointVersion collector_endpoint_version = 5; // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors diff --git a/generated_api_shadow/envoy/config/trace/v4alpha/BUILD b/generated_api_shadow/envoy/config/trace/v4alpha/BUILD deleted file mode 100644 index 1d56979cc4660..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/trace/v4alpha/http_tracer.proto b/generated_api_shadow/envoy/config/trace/v4alpha/http_tracer.proto deleted file mode 100644 index 33c8e73d56b9d..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v4alpha/http_tracer.proto +++ /dev/null @@ -1,59 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v4alpha; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; -option java_outer_classname = "HttpTracerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - -// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. -// -// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one -// supported. -// -// .. attention:: -// -// Use of this message type has been deprecated in favor of direct use of -// :ref:`Tracing.Http `. -message Tracing { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.Tracing"; - - // Configuration for an HTTP tracer provider used by Envoy. - // - // The configuration is defined by the - // :ref:`HttpConnectionManager.Tracing ` - // :ref:`provider ` - // field. - message Http { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.Tracing.Http"; - - reserved 2; - - reserved "config"; - - // The name of the HTTP trace driver to instantiate. The name must match a - // supported HTTP trace driver. - // See the :ref:`extensions listed in typed_config below ` for the default list of the HTTP trace driver. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Trace driver specific configuration which must be set according to the driver being instantiated. - // [#extension-category: envoy.tracers] - oneof config_type { - google.protobuf.Any typed_config = 3; - } - } - - // Provides configuration for the HTTP tracer. - Http http = 1; -} diff --git a/generated_api_shadow/envoy/config/trace/v4alpha/service.proto b/generated_api_shadow/envoy/config/trace/v4alpha/service.proto deleted file mode 100644 index d132b32dd79d4..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v4alpha/service.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; -option java_outer_classname = "ServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Trace Service] - -// Configuration structure. -message TraceServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.TraceServiceConfig"; - - // The upstream gRPC cluster that hosts the metrics service. - core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/BUILD b/generated_api_shadow/envoy/data/dns/v4alpha/BUILD deleted file mode 100644 index d26c09b3bed00..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/data/dns/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto deleted file mode 100644 index ed596b2cee790..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v4alpha/dns_table.proto +++ /dev/null @@ -1,168 +0,0 @@ -syntax = "proto3"; - -package envoy.data.dns.v4alpha; - -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.dns.v4alpha"; -option java_outer_classname = "DnsTableProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: DNS Filter Table Data] -// :ref:`DNS Filter config overview `. - -// This message contains the configuration for the DNS Filter if populated -// from the control plane -message DnsTable { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable"; - - // This message contains a list of IP addresses returned for a query for a known name - message AddressList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.AddressList"; - - // This field contains a well formed IP address that is returned in the answer for a - // name query. The address field can be an IPv4 or IPv6 address. Address family - // detection is done automatically when Envoy parses the string. Since this field is - // repeated, Envoy will return as many entries from this list in the DNS response while - // keeping the response under 512 bytes - repeated string address = 1 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; - } - - // Specify the service protocol using a numeric or string value - message DnsServiceProtocol { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsServiceProtocol"; - - oneof protocol_config { - option (validate.required) = true; - - // Specify the protocol number for the service. Envoy will try to resolve the number to - // the protocol name. For example, 6 will resolve to "tcp". Refer to: - // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml - // for protocol names and numbers - uint32 number = 1 [(validate.rules).uint32 = {lt: 255}]; - - // Specify the protocol name for the service. - string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - } - } - - // Specify the target for a given DNS service - // [#next-free-field: 6] - message DnsServiceTarget { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsServiceTarget"; - - // Specify the name of the endpoint for the Service. The name is a hostname or a cluster - oneof endpoint_type { - option (validate.required) = true; - - // Use a resolvable hostname as the endpoint for a service. - string host_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // Use a cluster name as the endpoint for a service. - string cluster_name = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - } - - // The priority of the service record target - uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}]; - - // The weight of the service record target - uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}]; - - // The port to which the service is bound. This value is optional if the target is a - // cluster. Setting port to zero in this case makes the filter use the port value - // from the cluster host - uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}]; - } - - // This message defines a service selection record returned for a service query in a domain - message DnsService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsService"; - - // The name of the service without the protocol or domain name - string service_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // The service protocol. This can be specified as a string or the numeric value of the protocol - DnsServiceProtocol protocol = 2; - - // The service entry time to live. This is independent from the DNS Answer record TTL - google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}]; - - // The list of targets hosting the service - repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}]; - } - - // Define a list of service records for a given service - message DnsServiceList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsServiceList"; - - repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - message DnsEndpoint { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsEndpoint"; - - oneof endpoint_config { - option (validate.required) = true; - - // Define a list of addresses to return for the specified endpoint - AddressList address_list = 1; - - // Define a cluster whose addresses are returned for the specified endpoint - string cluster_name = 2; - - // Define a DNS Service List for the specified endpoint - DnsServiceList service_list = 3; - } - } - - message DnsVirtualDomain { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; - - // A domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // The configuration containing the method to determine the address of this endpoint - DnsEndpoint endpoint = 2; - - // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; - } - - // Control how many times Envoy makes an attempt to forward a query to an external DNS server - uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - - // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this - // list empty, Envoy will forward all queries to external resolvers - repeated DnsVirtualDomain virtual_domains = 2; - - // This field is deprecated and no longer used in Envoy. The filter's behavior has changed - // internally to use a different data structure allowing the filter to determine whether a - // query is for known domain without the use of this field. - // - // This field serves to help Envoy determine whether it can authoritatively answer a query - // for a name matching a suffix in this list. If the query name does not match a suffix in - // this list, Envoy will forward the query to an upstream DNS server - repeated type.matcher.v4alpha.StringMatcher hidden_envoy_deprecated_known_suffixes = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD deleted file mode 100644 index 7d52fd1c2b1c1..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/access_loggers/file/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto deleted file mode 100644 index 62afb2040fdae..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v4alpha/file.proto +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.file.v4alpha; - -import "envoy/config/core/v4alpha/substitution_format_string.proto"; - -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha"; -option java_outer_classname = "FileProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: File access log] -// [#extension: envoy.access_loggers.file] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* -// AccessLog. -// [#next-free-field: 6] -message FileAccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.file.v3.FileAccessLog"; - - // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_len: 1}]; - - oneof access_log_format { - // Access log :ref:`format string`. - // Envoy supports :ref:`custom access log formats ` as well as a - // :ref:`default format `. - // This field is deprecated. - // Please use :ref:`log_format `. - string hidden_envoy_deprecated_format = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access log :ref:`format dictionary`. All values - // are rendered as strings. - // This field is deprecated. - // Please use :ref:`log_format `. - google.protobuf.Struct hidden_envoy_deprecated_json_format = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access log :ref:`format dictionary`. Values are - // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may - // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the - // documentation for a specific command operator for details. - // This field is deprecated. - // Please use :ref:`log_format `. - google.protobuf.Struct hidden_envoy_deprecated_typed_json_format = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v4alpha.SubstitutionFormatString log_format = 5 - [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v4alpha/BUILD deleted file mode 100644 index 83758c9e0b82b..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/access_loggers/grpc/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v4alpha/als.proto b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v4alpha/als.proto deleted file mode 100644 index 9e6fb1e48386e..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v4alpha/als.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.grpc.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.grpc.v4alpha"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Configuration for the built-in *envoy.access_loggers.http_grpc* -// :ref:`AccessLog `. This configuration will -// populate :ref:`StreamAccessLogsMessage.http_logs -// `. -// [#extension: envoy.access_loggers.http_grpc] -message HttpGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.grpc.v3.HttpGrpcAccessLogConfig"; - - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; - - // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers - // `. - repeated string additional_request_headers_to_log = 2; - - // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers - // `. - repeated string additional_response_headers_to_log = 3; - - // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers - // `. - repeated string additional_response_trailers_to_log = 4; -} - -// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will -// populate *StreamAccessLogsMessage.tcp_logs*. -// [#extension: envoy.access_loggers.tcp_grpc] -message TcpGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.grpc.v3.TcpGrpcAccessLogConfig"; - - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; -} - -// Common configuration for gRPC access logs. -// [#next-free-field: 7] -message CommonGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.grpc.v3.CommonGrpcAccessLogConfig"; - - // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier - // `. This allows the - // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The gRPC service for the access log service. - config.core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // API version for access logs service transport protocol. This describes the access logs service - // gRPC endpoint and version of messages used on the wire. - config.core.v4alpha.ApiVersion transport_api_version = 6 - [(validate.rules).enum = {defined_only: true}]; - - // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time - // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to - // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; - - // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until - // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it - // to zero effectively disables the batching. Defaults to 16384. - google.protobuf.UInt32Value buffer_size_bytes = 4; - - // Additional filter state objects to log in :ref:`filter_state_objects - // `. - // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object. - repeated string filter_state_objects_to_log = 5; -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v4alpha/BUILD deleted file mode 100644 index 2c81e3b0b05c4..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/access_loggers/grpc/v4alpha:pkg", - "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opentelemetry_proto//:common", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v4alpha/logs_service.proto b/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v4alpha/logs_service.proto deleted file mode 100644 index ceecd924e19d9..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v4alpha/logs_service.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.open_telemetry.v4alpha; - -import "envoy/extensions/access_loggers/grpc/v4alpha/als.proto"; - -import "opentelemetry/proto/common/v1/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v4alpha"; -option java_outer_classname = "LogsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: OpenTelemetry (gRPC) Access Log] - -// Configuration for the built-in *envoy.access_loggers.open_telemetry* -// :ref:`AccessLog `. This configuration will -// populate `opentelemetry.proto.collector.v1.logs.ExportLogsServiceRequest.resource_logs `_. -// OpenTelemetry `Resource `_ -// attributes are filled with Envoy node info. In addition, the request start time is set in the -// dedicated field. -// [#extension: envoy.access_loggers.open_telemetry] -// [#comment:TODO(itamarkam): allow configuration for resource attributes.] -message OpenTelemetryAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.open_telemetry.v3alpha.OpenTelemetryAccessLogConfig"; - - // [#comment:TODO(itamarkam): add 'filter_state_objects_to_log' to logs.] - grpc.v4alpha.CommonGrpcAccessLogConfig common_config = 1 - [(validate.rules).message = {required: true}]; - - // OpenTelemetry `LogResource `_ - // fields, following `Envoy access logging formatting `_. - // - // See 'body' in the LogResource proto for more details. - // Example: ``body { string_value: "%PROTOCOL%" }``. - opentelemetry.proto.common.v1.AnyValue body = 2; - - // See 'attributes' in the LogResource proto for more details. - // Example: ``attributes { values { key: "user_agent" value { string_value: "%REQ(USER-AGENT)%" } } }``. - opentelemetry.proto.common.v1.KeyValueList attributes = 3; -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/stream/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/stream/v4alpha/BUILD deleted file mode 100644 index 33240debccd19..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/stream/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/access_loggers/stream/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/stream/v4alpha/stream.proto b/generated_api_shadow/envoy/extensions/access_loggers/stream/v4alpha/stream.proto deleted file mode 100644 index 5be54ad4721dd..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/stream/v4alpha/stream.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.stream.v4alpha; - -import "envoy/config/core/v4alpha/substitution_format_string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.stream.v4alpha"; -option java_outer_classname = "StreamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Standard Streams Access loggers] -// [#extension: envoy.access_loggers.stream] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to the operating system's standard output. -message StdoutAccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.stream.v3.StdoutAccessLog"; - - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v4alpha.SubstitutionFormatString log_format = 1 - [(validate.rules).message = {required: true}]; - } -} - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to the operating system's standard error. -message StderrAccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.access_loggers.stream.v3.StderrAccessLog"; - - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v4alpha.SubstitutionFormatString log_format = 1 - [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD index d53049c388f7d..05f25a2fe5d91 100644 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/BUILD deleted file mode 100644 index ca83092e39b11..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/common/dynamic_forward_proxy/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/cluster.proto deleted file mode 100644 index 1b989e0bb725e..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v4alpha/cluster.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.dynamic_forward_proxy.v4alpha; - -import "envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v4alpha"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dynamic forward proxy cluster configuration] - -// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.dynamic_forward_proxy] -message ClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.clusters.dynamic_forward_proxy.v3.ClusterConfig"; - - // The DNS cache configuration that the cluster will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy HTTP filter configuration - // `. - common.dynamic_forward_proxy.v4alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; - - // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options - // in the :ref:`cluster's upstream_http_protocol_options - // ` - bool allow_insecure_cluster_options = 2; -} diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD index fb5436a6bf93a..6e07b4a9226bb 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD @@ -8,8 +8,8 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/cluster/v3:pkg", - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/core/v3:pkg", + "//envoy/extensions/common/key_value/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index fa77bb8aad338..4a0d87ff6c3b8 100644 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -6,6 +6,7 @@ import "envoy/config/cluster/v3/cluster.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/resolver.proto"; +import "envoy/extensions/common/key_value/v3/config.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -31,7 +32,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 13] +// [#next-free-field: 14] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -138,4 +139,8 @@ message DnsCacheConfig { // Setting this timeout will ensure that queries succeed or fail within the specified time frame // and are then retried using the standard refresh rates. Defaults to 5s if not set. google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gt {}}]; + + // [#not-implemented-hide:] + // Configuration to flush the DNS cache to long term storage. + key_value.v3.KeyValueStoreConfig key_value_config = 13; } diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/BUILD deleted file mode 100644 index 20571020ac927..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/cluster/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto deleted file mode 100644 index aa2831678ed82..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto +++ /dev/null @@ -1,144 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.dynamic_forward_proxy.v4alpha; - -import "envoy/config/cluster/v4alpha/cluster.proto"; -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/resolver.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v4alpha"; -option java_outer_classname = "DnsCacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dynamic forward proxy common configuration] - -// Configuration of circuit breakers for resolver. -message DnsCacheCircuitBreakers { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.dynamic_forward_proxy.v3.DnsCacheCircuitBreakers"; - - // The maximum number of pending requests that Envoy will allow to the - // resolver. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 1; -} - -// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview -// ` for more information. -// [#next-free-field: 13] -message DnsCacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.dynamic_forward_proxy.v3.DnsCacheConfig"; - - // The name of the cache. Multiple named caches allow independent dynamic forward proxy - // configurations to operate within a single Envoy process using different configurations. All - // configurations with the same name *must* otherwise have the same settings when referenced - // from different configuration components. Configuration will fail to load if this is not - // the case. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The DNS lookup family to use during resolution. - // - // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The - // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and - // then configures a host to have a primary and fall back address. With this, we could very - // likely build a "happy eyeballs" connection pool which would race the primary / fall back - // address and return the one that wins. This same method could potentially also be used for - // QUIC to TCP fall back.] - config.cluster.v4alpha.Cluster.DnsLookupFamily dns_lookup_family = 2 - [(validate.rules).enum = {defined_only: true}]; - - // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. - // - // .. note: - // - // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be - // added in a future change. - // - // .. note: - // - // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. - google.protobuf.Duration dns_refresh_rate = 3 - [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // The TTL for hosts that are unused. Hosts that have not been used in the configured time - // interval will be purged. If not specified defaults to 5m. - // - // .. note: - // - // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This - // means that if the configured TTL is shorter than the refresh rate the host may not be removed - // immediately. - // - // .. note: - // - // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; - - // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum hosts in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; - - // If the DNS failure refresh rate is specified, - // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the dns_refresh_rate. - config.cluster.v4alpha.Cluster.RefreshRate dns_failure_refresh_rate = 6; - - // The config of circuit breakers for resolver. It provides a configurable threshold. - // Envoy will use dns cache circuit breakers with default settings even if this value is not set. - DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool hidden_envoy_deprecated_use_tcp_for_dns_lookups = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - config.core.v4alpha.DnsResolutionConfig dns_resolution_config = 9; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - config.core.v4alpha.TypedExtensionConfig typed_dns_resolver_config = 12; - - // Hostnames that should be preresolved into the cache upon creation. This might provide a - // performance improvement, in the form of cache hits, for hostnames that are going to be - // resolved during steady state and are known at config load time. - repeated config.core.v4alpha.SocketAddress preresolve_hostnames = 10; - - // The timeout used for DNS queries. This timeout is independent of any timeout and retry policy - // used by the underlying DNS implementation (e.g., c-areas and Apple DNS) which are opaque. - // Setting this timeout will ensure that queries succeed or fail within the specified time frame - // and are then retried using the standard refresh rates. Defaults to 5s if not set. - google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gt {}}]; -} diff --git a/api/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD similarity index 87% rename from api/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD rename to generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD index d500cc41da1fe..1c1a6f6b44235 100644 --- a/api/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD +++ b/generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD @@ -6,7 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/trace/v3:pkg", + "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto b/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto new file mode 100644 index 0000000000000..0db9c622cd16c --- /dev/null +++ b/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package envoy.extensions.common.key_value.v3; + +import "envoy/config/core/v3/extension.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.common.key_value.v3"; +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Key Value Store storage plugin] + +// [#alpha:] +// This shared configuration for Envoy key value stores. +message KeyValueStoreConfig { + // [#extension-category: envoy.common.key_value] + config.core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}]; +} diff --git a/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD b/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD index 5fa93360e6558..1afd4545d9608 100644 --- a/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD @@ -6,8 +6,10 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ + "//envoy/annotations:pkg", "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", + "@com_github_cncf_udpa//xds/type/matcher/v3:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto b/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto index e317d885af393..eee82a381633b 100644 --- a/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto +++ b/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto @@ -5,6 +5,9 @@ package envoy.extensions.common.matching.v3; import "envoy/config/common/matcher/v3/matcher.proto"; import "envoy/config/core/v3/extension.proto"; +import "xds/type/matcher/v3/matcher.proto"; + +import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -21,8 +24,12 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // [#alpha:] message ExtensionWithMatcher { + // The associated matcher. This is deprecated in favor of xds_matcher. + config.common.matcher.v3.Matcher matcher = 1 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + // The associated matcher. - config.common.matcher.v3.Matcher matcher = 1 [(validate.rules).message = {required: true}]; + xds.type.matcher.v3.Matcher xds_matcher = 3; // The underlying extension config. config.core.v3.TypedExtensionConfig extension_config = 2 diff --git a/generated_api_shadow/envoy/extensions/common/matching/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/matching/v4alpha/BUILD deleted file mode 100644 index 95ccc22a554af..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/matching/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/matcher/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/common/matching/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/matching/v4alpha/extension_matcher.proto b/generated_api_shadow/envoy/extensions/common/matching/v4alpha/extension_matcher.proto deleted file mode 100644 index 88ac7c7570f8d..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/matching/v4alpha/extension_matcher.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.matching.v4alpha; - -import "envoy/config/common/matcher/v4alpha/matcher.proto"; -import "envoy/config/core/v4alpha/extension.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.matching.v4alpha"; -option java_outer_classname = "ExtensionMatcherProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Extension Matcher] - -// Wrapper around an existing extension that provides an associated matcher. This allows -// decorating an existing extension with a matcher, which can be used to match against -// relevant protocol data. -// -// [#alpha:] -message ExtensionWithMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.matching.v3.ExtensionWithMatcher"; - - // The associated matcher. - config.common.matcher.v4alpha.Matcher matcher = 1 [(validate.rules).message = {required: true}]; - - // The underlying extension config. - config.core.v4alpha.TypedExtensionConfig extension_config = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD deleted file mode 100644 index 4f2cbe751624c..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/tap/v4alpha:pkg", - "//envoy/extensions/common/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto deleted file mode 100644 index d04e033f490bc..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/tap/v4alpha/common.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.tap.v4alpha; - -import "envoy/config/tap/v4alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.tap.v4alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common tap extension configuration] - -// Common configuration for all tap extensions. -message CommonExtensionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.tap.v3.CommonExtensionConfig"; - - oneof config_type { - option (validate.required) = true; - - // If specified, the tap filter will be configured via an admin handler. - AdminConfig admin_config = 1; - - // If specified, the tap filter will be configured via a static configuration that cannot be - // changed. - config.tap.v4alpha.TapConfig static_config = 2; - } -} - -// Configuration for the admin handler. See :ref:`here ` for -// more information. -message AdminConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.common.tap.v3.AdminConfig"; - - // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is - // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD deleted file mode 100644 index 583ecda68091a..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/http/cache/v3alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto deleted file mode 100644 index 5297a3d15ef89..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v4alpha/cache.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.cache.v4alpha; - -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha"; -option java_outer_classname = "CacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP Cache Filter] - -// [#extension: envoy.filters.http.cache] -message CacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.cache.v3alpha.CacheConfig"; - - // [#not-implemented-hide:] - // Modifies cache key creation by restricting which parts of the URL are included. - message KeyCreatorParams { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams"; - - // If true, exclude the URL scheme from the cache key. Set to true if your origins always - // produce the same response for http and https requests. - bool exclude_scheme = 1; - - // If true, exclude the host from the cache key. Set to true if your origins' responses don't - // ever depend on host. - bool exclude_host = 2; - - // If *query_parameters_included* is nonempty, only query parameters matched - // by one or more of its matchers are included in the cache key. Any other - // query params will not affect cache lookup. - repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3; - - // If *query_parameters_excluded* is nonempty, query parameters matched by one - // or more of its matchers are excluded from the cache key (even if also - // matched by *query_parameters_included*), and will not affect cache lookup. - repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4; - } - - // Config specific to the cache storage implementation. - // [#extension-category: envoy.filters.http.cache] - google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - - // List of matching rules that defines allowed *Vary* headers. - // - // The *vary* response header holds a list of header names that affect the - // contents of a response, as described by - // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. - // - // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't matched by any rules in - // *allowed_vary_headers*, that response will not be cached. - // - // During lookup, *allowed_vary_headers* controls what request headers will be - // sent to the cache storage implementation. - repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2; - - // [#not-implemented-hide:] - // - // - // Modifies cache key creation by restricting which parts of the URL are included. - KeyCreatorParams key_creator_params = 3; - - // [#not-implemented-hide:] - // - // - // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache - // storage implementation may have its own limit beyond which it will reject insertions). - uint32 max_body_bytes = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/compressor/v4alpha/BUILD deleted file mode 100644 index b8bf9faed35f8..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/http/compressor/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v4alpha/compressor.proto b/generated_api_shadow/envoy/extensions/filters/http/compressor/v4alpha/compressor.proto deleted file mode 100644 index b7757531c024d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v4alpha/compressor.proto +++ /dev/null @@ -1,134 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.compressor.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v4alpha"; -option java_outer_classname = "CompressorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Compressor] -// Compressor :ref:`configuration overview `. -// [#extension: envoy.filters.http.compressor] - -// [#next-free-field: 9] -message Compressor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.compressor.v3.Compressor"; - - message CommonDirectionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.compressor.v3.Compressor.CommonDirectionConfig"; - - // Runtime flag that controls whether compression is enabled or not for the direction this - // common config is put in. If set to false, the filter will operate as a pass-through filter - // in the chosen direction. If the field is omitted, the filter will be enabled. - config.core.v4alpha.RuntimeFeatureFlag enabled = 1; - - // Minimum value of Content-Length header of request or response messages (depending on the direction - // this common config is put in), in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value min_content_length = 2; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string content_type = 3; - } - - // Configuration for filter behavior on the request direction. - message RequestDirectionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.compressor.v3.Compressor.RequestDirectionConfig"; - - CommonDirectionConfig common_config = 1; - } - - // Configuration for filter behavior on the response direction. - message ResponseDirectionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.compressor.v3.Compressor.ResponseDirectionConfig"; - - CommonDirectionConfig common_config = 1; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool disable_on_etag_header = 2; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // - // .. attention:: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool remove_accept_encoding_header = 3; - } - - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value hidden_envoy_deprecated_content_length = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string hidden_envoy_deprecated_content_type = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool hidden_envoy_deprecated_disable_on_etag_header = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // - // .. attention:: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool hidden_envoy_deprecated_remove_accept_encoding_header = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Runtime flag that controls whether the filter is enabled or not. If set to false, the - // filter will operate as a pass-through filter. If not specified, defaults to enabled. - config.core.v4alpha.RuntimeFeatureFlag hidden_envoy_deprecated_runtime_enabled = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // A compressor library to use for compression. Currently only - // :ref:`envoy.compression.gzip.compressor` - // is included in Envoy. - // [#extension-category: envoy.compression.compressor] - config.core.v4alpha.TypedExtensionConfig compressor_library = 6 - [(validate.rules).message = {required: true}]; - - // Configuration for request compression. Compression is disabled by default if left empty. - RequestDirectionConfig request_direction_config = 7; - - // Configuration for response compression. Compression is enabled by default if left empty. - // - // .. attention:: - // - // If the field is not empty then the duplicate deprecated fields of the `Compressor` message, - // such as `content_length`, `content_type`, `disable_on_etag_header`, - // `remove_accept_encoding_header` and `runtime_enabled`, are ignored. - // - // Also all the statistics related to response compression will be rooted in - // `.compressor...response.*` - // instead of - // `.compressor...*`. - ResponseDirectionConfig response_direction_config = 8; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD deleted file mode 100644 index d12fc7262cac4..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/http/csrf/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto b/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto deleted file mode 100644 index 3de55da6be8cf..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.csrf.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha"; -option java_outer_classname = "CsrfProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: CSRF] -// Cross-Site Request Forgery :ref:`configuration overview `. -// [#extension: envoy.filters.http.csrf] - -// CSRF filter config. -message CsrfPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.csrf.v3.CsrfPolicy"; - - // Specifies the % of requests for which the CSRF filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message = {required: true}]; - - // Specifies that CSRF policies will be evaluated and tracked, but not enforced. - // - // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* and *Destination* to determine if it's valid, but will not - // enforce any policies. - config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2; - - // Specifies additional source origins that will be allowed in addition to - // the destination origin. - // - // More information on how this can be configured via runtime can be found - // :ref:`here `. - repeated type.matcher.v4alpha.StringMatcher additional_origins = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/BUILD deleted file mode 100644 index 8486b45d71d91..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v4alpha:pkg", - "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/dynamic_forward_proxy.proto deleted file mode 100644 index 0dba06106b074..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v4alpha/dynamic_forward_proxy.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.dynamic_forward_proxy.v4alpha; - -import "envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v4alpha"; -option java_outer_classname = "DynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dynamic forward proxy] - -// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.filters.http.dynamic_forward_proxy] -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig"; - - // The DNS cache configuration that the filter will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy cluster configuration - // `. - common.dynamic_forward_proxy.v4alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; -} - -// Per route Configuration for the dynamic forward proxy HTTP filter. -message PerRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.dynamic_forward_proxy.v3.PerRouteConfig"; - - oneof host_rewrite_specifier { - // Indicates that before DNS lookup, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for DNS lookups whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite_literal = 1; - - // Indicates that before DNS lookup, the host header will be swapped with - // the value of this header. If not set or empty, the original host header - // value will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite header ` - // given that the value set here would be used for DNS lookups whereas the value set in the HCM - // would be used for host header forwarding which is not the desired outcome. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string host_rewrite_header = 2; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index c04d53f1cf8be..62feb51b191d5 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -10,6 +10,7 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; +import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -308,7 +309,7 @@ message CheckSettings { // // These settings are only applied to a filter configured with a // :ref:`grpc_service`. - map context_extensions = 1; + map context_extensions = 1 [(udpa.annotations.sensitive) = true]; // When set to true, disable the configured :ref:`with_request_body // ` for a route. diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD deleted file mode 100644 index 16a0c5f1b64c4..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/http/ext_authz/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto deleted file mode 100644 index 35b0cbd2f5475..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto +++ /dev/null @@ -1,316 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ext_authz.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; -import "envoy/config/core/v4alpha/http_uri.proto"; -import "envoy/type/matcher/v4alpha/metadata.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/v3/http_status.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: External Authorization] -// External Authorization :ref:`configuration overview `. -// [#extension: envoy.filters.http.ext_authz] - -// [#next-free-field: 16] -message ExtAuthz { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; - - reserved 4; - - reserved "use_alpha"; - - // External authorization service configuration. - oneof services { - // gRPC service configuration (default timeout: 200ms). - config.core.v4alpha.GrpcService grpc_service = 1; - - // HTTP service configuration (default timeout: 200ms). - HttpService http_service = 3; - } - - // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and - // version of messages used on the wire. - config.core.v4alpha.ApiVersion transport_api_version = 12 - [(validate.rules).enum = {defined_only: true}]; - - // Changes filter's behaviour on errors: - // - // 1. When set to true, the filter will *accept* client request even if the communication with - // the authorization service has failed, or if the authorization service has returned a HTTP 5xx - // error. - // - // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* - // response if the communication with the authorization service has failed, or if the - // authorization service has returned a HTTP 5xx error. - // - // Note that errors can be *always* tracked in the :ref:`stats - // `. - bool failure_mode_allow = 2; - - // Enables filter to buffer the client request body and send it within the authorization request. - // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization - // request message indicating if the body data is partial. - BufferSettings with_request_body = 5; - - // Clears route cache in order to allow the external authorization service to correctly affect - // routing decisions. Filter clears all cached routes when: - // - // 1. The field is set to *true*. - // - // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. - // - // 3. At least one *authorization response header* is added to the client request, or is used for - // altering another client request header. - // - bool clear_route_cache = 6; - - // Sets the HTTP status that is returned to the client when there is a network error between the - // filter and the authorization server. The default status is HTTP 403 Forbidden. - type.v3.HttpStatus status_on_error = 7; - - // Specifies a list of metadata namespaces whose values, if present, will be passed to the - // ext_authz service as an opaque *protobuf::Struct*. - // - // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata - // ` is set, - // then the following will pass the jwt payload to the authorization server. - // - // .. code-block:: yaml - // - // metadata_context_namespaces: - // - envoy.filters.http.jwt_authn - // - repeated string metadata_context_namespaces = 8; - - // Specifies if the filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // If this field is not specified, the filter will be enabled for all requests. - config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; - - // Specifies if the filter is enabled with metadata matcher. - // If this field is not specified, the filter will be enabled for all requests. - type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 14; - - // Specifies whether to deny the requests, when the filter is disabled. - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to determine whether to deny request for - // filter protected path at filter disabling. If filter is disabled in - // typed_per_filter_config for the path, requests will not be denied. - // - // If this field is not specified, all requests will be allowed when disabled. - config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 10; - - // Optional additional prefix to use when emitting statistics. This allows to distinguish - // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: - // - // .. code-block:: yaml - // - // http_filters: - // - name: envoy.filters.http.ext_authz - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. - // - name: envoy.filters.http.ext_authz - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. - // - string stat_prefix = 13; - - // Optional labels that will be passed to :ref:`labels` in - // :ref:`destination`. - // The labels will be read from :ref:`metadata` with the specified key. - string bootstrap_metadata_labels_key = 15; -} - -// Configuration for buffering the request data. -message BufferSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.BufferSettings"; - - // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return - // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number - // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow - // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; - - // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. - // The authorization request will be dispatched and no 413 HTTP error will be returned by the - // filter. - bool allow_partial_message = 2; - - // If true, the body sent to the external authorization service is set with raw bytes, it sets - // the :ref:`raw_body` - // field of HTTP request attribute context. Otherwise, :ref:` - // body` will be filled - // with UTF-8 string request body. - bool pack_as_bytes = 3; -} - -// HttpService is used for raw HTTP communication between the filter and the authorization service. -// When configured, the filter will parse the client request and use these attributes to call the -// authorization server. Depending on the response, the filter may reject or accept the client -// request. Note that in any of these events, metadata can be added, removed or overridden by the -// filter: -// -// *On authorization request*, a list of allowed request headers may be supplied. See -// :ref:`allowed_headers -// ` -// for details. Additional headers metadata may be added to the authorization request. See -// :ref:`headers_to_add -// ` for -// details. -// -// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and -// additional headers metadata may be added to the original client request. See -// :ref:`allowed_upstream_headers -// ` -// for details. Additionally, the filter may add additional headers to the client's response. See -// :ref:`allowed_client_headers_on_success -// ` -// for details. -// -// On other authorization response statuses, the filter will not allow traffic. Additional headers -// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers -// ` -// for details. -// [#next-free-field: 9] -message HttpService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.HttpService"; - - reserved 3, 4, 5, 6; - - // Sets the HTTP server URI which the authorization requests must be sent to. - config.core.v4alpha.HttpUri server_uri = 1; - - // Sets a prefix to the value of authorization request header *Path*. - string path_prefix = 2; - - // Settings used for controlling authorization request metadata. - AuthorizationRequest authorization_request = 7; - - // Settings used for controlling authorization response metadata. - AuthorizationResponse authorization_response = 8; -} - -message AuthorizationRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; - - // Authorization request includes the client request headers that have a correspondent match - // in the :ref:`list `. - // - // .. note:: - // - // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, - // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. - // - // .. note:: - // - // By default, ``Content-Length`` header is set to ``0`` and the request to the authorization - // service has no message body. However, the authorization request *may* include the buffered - // client request body (controlled by :ref:`with_request_body - // ` - // setting) hence the value of its ``Content-Length`` reflects the size of its payload size. - // - type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; - - // Sets a list of headers that will be included to the request to authorization service. Note that - // client request of the same key will be overridden. - repeated config.core.v4alpha.HeaderValue headers_to_add = 2; -} - -message AuthorizationResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse"; - - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the original client request. - // Note that coexistent headers will be overridden. - type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; - - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that coexistent headers will be appended. - type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that when this list is *not* set, all the authorization response headers, except *Authority - // (Host)* will be in the response to the client. When a header is included in this list, *Path*, - // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response when - // the authorization response itself is successful, i.e. not failed or denied. When this list is - // *not* set, no additional headers will be added to the client's response on success. - type.matcher.v4alpha.ListStringMatcher allowed_client_headers_on_success = 4; -} - -// Extra settings on a per virtualhost/route/weighted-cluster level. -message ExtAuthzPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute"; - - oneof override { - option (validate.required) = true; - - // Disable the ext auth filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; - } -} - -// Extra settings for the check request. -message CheckSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; - - // Context extensions to set on the CheckRequest's - // :ref:`AttributeContext.context_extensions` - // - // You can use this to provide extra context for the external authorization server on specific - // virtual hosts/routes. For example, adding a context extension on the virtual host level can - // give the ext-authz server information on what virtual host is used without needing to parse the - // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged - // in order, and the result will be used. - // - // Merge semantics for this field are such that keys from more specific configs override. - // - // .. note:: - // - // These settings are only applied to a filter configured with a - // :ref:`grpc_service`. - map context_extensions = 1; - - // When set to true, disable the configured :ref:`with_request_body - // ` for a route. - bool disable_request_body_buffering = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto index 352403ad5b20d..f60865c62315e 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto @@ -23,26 +23,15 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // The External Processing filter allows an external service to act on HTTP traffic in a flexible way. // **Current Implementation Status:** -// The filter will send the "request_headers" and "response_headers" messages by default. -// In addition, if the "processing mode" is set , the "request_body" and "response_body" -// messages will be sent if the corresponding fields of the "processing_mode" are -// set to BUFFERED or STREAMED, and trailers will be sent if the corresponding fields are set -// to SEND. The BUFFERED_PARTIAL body processing mode is not -// implemented yet. The filter will also respond to "immediate_response" messages -// at any point in the stream. - -// As designed, the filter supports up to six different processing steps, which are in the -// process of being implemented: +// All options and processing modes are implemented except for the following: // -// * Request headers: IMPLEMENTED -// * Request body: BUFFERED_PARTIAL processing mode is not yet implemented -// * Request trailers: IMPLEMENTED -// * Response headers: IMPLEMENTED -// * Response body: BUFFERED_PARTIAL processing mode is not yet implemented -// * Response trailers: IMPLEMENTED - -// The filter communicates with an external gRPC service that can use it to do a variety of things -// with the request and response: +// * Request and response attributes are not sent and not processed. +// * Dynamic metadata in responses from the external processor is ignored. +// * "async mode" is not implemented +// * Per-route configuration is not implemented + +// The filter communicates with an external gRPC service called an "external processor" +// that can do a variety of things with the request and response: // // * Access and modify the HTTP headers on the request, response, or both // * Access and modify the HTTP request and response bodies @@ -62,6 +51,30 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // * To modify request or response trailers if they already exist // * To add request or response trailers where they are not present // +// The filter supports up to six different processing steps. Each is represented by +// a gRPC stream message that is sent to the external processor. For each message, the +// processor must send a matching response. +// +// * Request headers: Contains the headers from the original HTTP request. +// * Request body: Sent in a single message if the BUFFERED or BUFFERED_PARTIAL +// mode is chosen, in multiple messages if the STREAMED mode is chosen, and not +// at all otherwise. +// * Request trailers: Delivered if they are present and if the trailer mode is set +// to SEND. +// * Response headers: Contains the headers from the HTTP response. Keep in mind +// that if the upstream system sends them before processing the request body that +// this message may arrive before the complete body. +// * Response body: Sent according to the processing mode like the request body. +// * Response trailers: Delivered according to the processing mode like the +// request trailers. +// +// By default, the processor sends only the request and response headers messages. +// This may be changed to include any of the six steps by changing the processing_mode +// setting of the filter configuration, or by setting the mode_override of any response +// from the external processor. This way, a processor may, for example, use information +// in the request header to determine whether the message body must be examined, or whether +// the proxy should simply stream it straight through. +// // All of this together allows a server to process the filter traffic in fairly // sophisticated ways. For example: // diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD deleted file mode 100644 index 6b7506bcbf76d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/common/fault/v3:pkg", - "//envoy/extensions/filters/http/fault/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto deleted file mode 100644 index da8b8b48ad3f5..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v4alpha/fault.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.fault.v4alpha; - -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/extensions/filters/common/fault/v3/fault.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Fault Injection] -// Fault Injection :ref:`configuration overview `. -// [#extension: envoy.filters.http.fault] - -// [#next-free-field: 6] -message FaultAbort { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.fault.v3.FaultAbort"; - - // Fault aborts are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderAbort { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort"; - } - - reserved 1; - - oneof error_type { - option (validate.required) = true; - - // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // gRPC status code to use to abort the gRPC request. - uint32 grpc_status = 5; - - // Fault aborts are controlled via an HTTP header (if applicable). - HeaderAbort header_abort = 4; - } - - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. - type.v3.FractionalPercent percentage = 3; -} - -// [#next-free-field: 16] -message HTTPFault { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.fault.v3.HTTPFault"; - - // If specified, the filter will inject delays based on the values in the - // object. - common.fault.v3.FaultDelay delay = 1; - - // If specified, the filter will abort requests based on the values in - // the object. At least *abort* or *delay* must be specified. - FaultAbort abort = 2; - - // Specifies the name of the (destination) upstream cluster that the - // filter should match on. Fault injection will be restricted to requests - // bound to the specific upstream cluster. - string upstream_cluster = 3; - - // Specifies a set of headers that the filter should match on. The fault - // injection filter can be applied selectively to requests that match a set of - // headers specified in the fault filter config. The chances of actual fault - // injection further depend on the value of the :ref:`percentage - // ` field. - // The filter will check the request's headers against all the specified - // headers in the filter config. A match will happen if all the headers in the - // config are present in the request with the same values (or based on - // presence if the *value* field is not in the config). - repeated config.route.v4alpha.HeaderMatcher headers = 4; - - // Faults are injected for the specified list of downstream hosts. If this - // setting is not set, faults are injected for all downstream nodes. - // Downstream node name is taken from :ref:`the HTTP - // x-envoy-downstream-service-node - // ` header and compared - // against downstream_nodes list. - repeated string downstream_nodes = 5; - - // The maximum number of faults that can be active at a single time via the configured fault - // filter. Note that because this setting can be overridden at the route level, it's possible - // for the number of active faults to be greater than this value (if injected via a different - // route). If not specified, defaults to unlimited. This setting can be overridden via - // `runtime ` and any faults that are not injected - // due to overflow will be indicated via the `faults_overflow - // ` stat. - // - // .. attention:: - // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy - // limit. It's possible for the number of active faults to rise slightly above the configured - // amount due to the implementation details. - google.protobuf.UInt32Value max_active_faults = 6; - - // The response rate limit to be applied to the response body of the stream. When configured, - // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent - // ` runtime key. - // - // .. attention:: - // This is a per-stream limit versus a connection level limit. This means that concurrent streams - // will each get an independent limit. - common.fault.v3.FaultRateLimit response_rate_limit = 7; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_delay_percent - string delay_percent_runtime = 8; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.abort_percent - string abort_percent_runtime = 9; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_duration_ms - string delay_duration_runtime = 10; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.http_status - string abort_http_status_runtime = 11; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.max_active_faults - string max_active_faults_runtime = 12; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.rate_limit.response_percent - string response_rate_limit_percent_runtime = 13; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.grpc_status - string abort_grpc_status_runtime = 14; - - // To control whether stats storage is allocated dynamically for each downstream server. - // If set to true, "x-envoy-downstream-service-cluster" field of header will be ignored by this filter. - // If set to false, dynamic stats storage will be allocated for the downstream cluster name. - // Default value is false. - bool disable_downstream_cluster_stats = 15; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto index b2c4ad2ee6815..615fea923a8e1 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto @@ -28,8 +28,25 @@ message FilterConfig { // If true, Envoy will assume that the upstream doesn't understand gRPC frames and // strip the gRPC frame from the request, and add it back in to the response. This will // hide the gRPC semantics from the upstream, allowing it to receive and respond with a - // simple binary encoded protobuf. + // simple binary encoded protobuf. In order to calculate the `Content-Length` header value, Envoy + // will buffer the upstream response unless :ref:`response_size_header + // ` + // is set, in which case Envoy will use the value of an upstream header to calculate the content + // length. bool withhold_grpc_frames = 2; + + // When :ref:`withhold_grpc_frames + // ` + // is true, this option controls how Envoy calculates the `Content-Length`. When + // *response_size_header* is empty, Envoy will buffer the upstream response to calculate its + // size. When *response_size_header* is set to a non-empty string, Envoy will stream the response + // to the downstream and it will use the value of the response header with this name to set the + // `Content-Length` header and gRPC frame size. If the header with this name is repeated, only + // the first value will be used. + // + // Envoy will treat the upstream response as an error if this option is specified and the header + // is missing or if the value does not match the actual response body size. + string response_size_header = 3; } // gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level. diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/gzip/v4alpha/BUILD deleted file mode 100644 index 3b9648df09294..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/filters/http/compressor/v4alpha:pkg", - "//envoy/extensions/filters/http/gzip/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v4alpha/gzip.proto b/generated_api_shadow/envoy/extensions/filters/http/gzip/v4alpha/gzip.proto deleted file mode 100644 index 8689148b46253..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v4alpha/gzip.proto +++ /dev/null @@ -1,81 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.gzip.v4alpha; - -import "envoy/extensions/filters/http/compressor/v4alpha/compressor.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.gzip.v4alpha"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Gzip] - -// [#next-free-field: 12] -message Gzip { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.gzip.v3.Gzip"; - - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } - - message CompressionLevel { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.gzip.v3.Gzip.CompressionLevel"; - - enum Enum { - DEFAULT = 0; - BEST = 1; - SPEED = 2; - } - } - - reserved 2, 6, 7, 8; - - reserved "content_length", "content_type", "disable_on_etag_header", - "remove_accept_encoding_header"; - - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - - // A value used for selecting the zlib compression level. This setting will affect speed and - // amount of compression applied to the content. "BEST" provides higher compression at the cost of - // higher latency, "SPEED" provides lower compression with minimum impact on response time. - // "DEFAULT" provides an optimal result between speed and compression. This field will be set to - // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; - - // A value used for selecting the zlib compression strategy which is directly related to the - // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though - // there are situations which changing this parameter might produce better results. For example, - // run-length encoding (RLE) is typically used when the content is known for having sequences - // which same data occurs many consecutive times. For more information about each strategy, please - // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; - - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 12 - // which will produce a 4096 bytes window. For more details about this parameter, please refer to - // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Set of configuration parameters common for all compression filters. You can define - // `content_length`, `content_type` and other parameters in this field. - compressor.v4alpha.Compressor compressor = 10; - - // Value for Zlib's next output buffer. If not set, defaults to 4096. - // See https://www.zlib.net/manual.html for more details. Also see - // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. - google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD deleted file mode 100644 index 0a8d5eb27fb44..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto deleted file mode 100644 index 5b06f1e78556b..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto +++ /dev/null @@ -1,130 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.header_to_metadata.v4alpha; - -import "envoy/type/matcher/v4alpha/regex.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha"; -option java_outer_classname = "HeaderToMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Header-To-Metadata Filter] -// -// The configuration for transforming headers into metadata. This is useful -// for matching load balancer subsets, logging, etc. -// -// Header to Metadata :ref:`configuration overview `. -// [#extension: envoy.filters.http.header_to_metadata] - -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.header_to_metadata.v3.Config"; - - enum ValueType { - STRING = 0; - - NUMBER = 1; - - // The value is a serialized `protobuf.Value - // `_. - PROTOBUF_VALUE = 2; - } - - // ValueEncode defines the encoding algorithm. - enum ValueEncode { - // The value is not encoded. - NONE = 0; - - // The value is encoded in `Base64 `_. - // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the - // non-ASCII characters in the header. - BASE64 = 1; - } - - // [#next-free-field: 7] - message KeyValuePair { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair"; - - // The namespace — if this is empty, the filter's namespace will be used. - string metadata_namespace = 1; - - // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_len: 1}]; - - oneof value_type { - // The value to pair with the given key. - // - // When used for a - // :ref:`on_header_present ` - // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. - // - // When used for a :ref:`on_header_missing ` - // case, a non-empty value must be provided otherwise no metadata is added. - string value = 3; - - // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value - // is used as-is. - // - // This is only used for :ref:`on_header_present `. - // - // Note: if the `value` field is non-empty this field should be empty. - type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6; - } - - // The value's type — defaults to string. - ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; - - // How is the value encoded, default is NONE (not encoded). - // The value will be decoded accordingly before storing to metadata. - ValueEncode encode = 5; - } - - // A Rule defines what metadata to apply when a header is present or missing. - // [#next-free-field: 6] - message Rule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; - - oneof header_cookie_specifier { - // Specifies that a match will be performed on the value of a header or a cookie. - // - // The header to be extracted. - string header = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The cookie to be extracted. - string cookie = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - } - - // If the header or cookie is present, apply this metadata KeyValuePair. - // - // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header or cookie value. - KeyValuePair on_present = 2; - - // If the header or cookie is not present, apply this metadata KeyValuePair. - // - // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header or cookie value. - KeyValuePair on_missing = 3; - - // Whether or not to remove the header after a rule is applied. - // - // This prevents headers from leaking. - // This field is not supported in case of a cookie. - bool remove = 4; - } - - // The list of rules to apply to requests. - repeated Rule request_rules = 1; - - // The list of rules to apply to responses. - repeated Rule response_rules = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD deleted file mode 100644 index 4c4dc0e452110..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/http/health_check/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto deleted file mode 100644 index 3725d085dd7b0..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.health_check.v4alpha; - -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Health check] -// Health check :ref:`configuration overview `. -// [#extension: envoy.filters.http.health_check] - -// [#next-free-field: 6] -message HealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.health_check.v3.HealthCheck"; - - reserved 2; - - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; - - // If operating in pass through mode, the amount of time in milliseconds - // that the filter should cache the upstream response. - google.protobuf.Duration cache_time = 3; - - // If operating in non-pass-through mode, specifies a set of upstream cluster - // names and the minimum percentage of servers in each of those clusters that - // must be healthy or degraded in order for the filter to return a 200. - // - // .. note:: - // - // This value is interpreted as an integer by truncating, so 12.50% will be calculated - // as if it were 12%. - map cluster_min_healthy_percentages = 4; - - // Specifies a set of health check request headers to match on. The health check filter will - // check a request’s headers against all the specified headers. To specify the health check - // endpoint, set the ``:path`` header to match on. - repeated config.route.v4alpha.HeaderMatcher headers = 5; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 9e658ed8627ff..9718dbe0550ab 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // cache_duration: // seconds: 300 // -// [#next-free-field: 13] +// [#next-free-field: 14] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; @@ -181,6 +181,19 @@ message JwtProvider { // repeated string from_params = 7; + // JWT is sent in a cookie. `from_cookies` represents the cookie names to extract from. + // + // For example, if config is: + // + // .. code-block:: yaml + // + // from_cookies: + // - auth-token + // + // Then JWT will be extracted from `auth-token` cookie in the request. + // + repeated string from_cookies = 13; + // This field specifies the header name to forward a successfully verified JWT payload to the // backend. The forwarded data is:: // diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD deleted file mode 100644 index f59226044ce77..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/http/jwt_authn/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto deleted file mode 100644 index 57c6630c940e7..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto +++ /dev/null @@ -1,674 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.jwt_authn.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/http_uri.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: JWT Authentication] -// JWT Authentication :ref:`configuration overview `. -// [#extension: envoy.filters.http.jwt_authn] - -// Please see following for JWT authentication flow: -// -// * `JSON Web Token (JWT) `_ -// * `The OAuth 2.0 Authorization Framework `_ -// * `OpenID Connect `_ -// -// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: -// -// * issuer: the principal that issues the JWT. If specified, it has to match the *iss* field in JWT. -// * allowed audiences: the ones in the token have to be listed here. -// * how to fetch public key JWKS to verify the token signature. -// * how to extract JWT token in the request. -// * how to pass successfully verified token payload. -// -// Example: -// -// .. code-block:: yaml -// -// issuer: https://example.com -// audiences: -// - bookstore_android.apps.googleusercontent.com -// - bookstore_web.apps.googleusercontent.com -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// timeout: 1s -// cache_duration: -// seconds: 300 -// -// [#next-free-field: 13] -message JwtProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; - - // Specify the `principal `_ that issued - // the JWT, usually a URL or an email address. - // - // It is optional. If specified, it has to match the *iss* field in JWT. - // - // If a JWT has *iss* field and this field is specified, they have to match, otherwise the - // JWT *iss* field is not checked. - // - // Note: *JwtRequirement* :ref:`allow_missing ` - // and :ref:`allow_missing_or_failed ` - // are implemented differently than other *JwtRequirements*. Hence the usage of this field - // is different as follows if *allow_missing* or *allow_missing_or_failed* is used: - // - // * If a JWT has *iss* field, it needs to be specified by this field in one of *JwtProviders*. - // * If a JWT doesn't have *iss* field, one of *JwtProviders* should fill this field empty. - // * Multiple *JwtProviders* should not have same value in this field. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com - // - string issuer = 1; - - // The list of JWT `audiences `_ are - // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, - // will not check audiences in the token. - // - // Example: - // - // .. code-block:: yaml - // - // audiences: - // - bookstore_android.apps.googleusercontent.com - // - bookstore_web.apps.googleusercontent.com - // - repeated string audiences = 2; - - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; - - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // timeout: 1s - // cache_duration: - // seconds: 300 - // - RemoteJwks remote_jwks = 3; - - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - config.core.v4alpha.DataSource local_jwks = 4; - } - - // If false, the JWT is removed in the request after a success verification. If true, the JWT is - // not removed in the request. Default value is false. - bool forward = 5; - - // Two fields below define where to extract the JWT from an HTTP request. - // - // If no explicit location is specified, the following default locations are tried in order: - // - // 1. The Authorization header using the `Bearer schema - // `_. Example:: - // - // Authorization: Bearer . - // - // 2. `access_token `_ query parameter. - // - // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations - // its provider specified or from the default locations. - // - // Specify the HTTP headers to extract JWT token. For examples, following config: - // - // .. code-block:: yaml - // - // from_headers: - // - name: x-goog-iap-jwt-assertion - // - // can be used to extract token from header:: - // - // ``x-goog-iap-jwt-assertion: ``. - // - repeated JwtHeader from_headers = 6; - - // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. - // - // For example, if config is: - // - // .. code-block:: yaml - // - // from_params: - // - jwt_token - // - // The JWT format in query parameter is:: - // - // /path?jwt_token= - // - repeated string from_params = 7; - - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - string forward_payload_header = 8 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // When :ref:`forward_payload_header ` - // is specified, the base64 encoded payload will be added to the headers. - // Normally JWT based64 encode doesn't add padding. If this field is true, - // the header will be padded. - // - // This field is only relevant if :ref:`forward_payload_header ` - // is specified. - bool pad_forward_payload_header = 11; - - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - string payload_in_metadata = 9; - - // Specify the clock skew in seconds when verifying JWT time constraint, - // such as `exp`, and `nbf`. If not specified, default is 60 seconds. - uint32 clock_skew_seconds = 10; - - // Enables JWT cache, its size is specified by *jwt_cache_size*. - // Only valid JWT tokens are cached. - JwtCacheConfig jwt_cache_config = 12; -} - -// This message specifies JWT Cache configuration. -message JwtCacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtCacheConfig"; - - // The unit is number of JWT tokens, default to 100. - uint32 jwt_cache_size = 1; -} - -// This message specifies how to fetch JWKS from remote and how to cache it. -message RemoteJwks { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks"; - - // The HTTP URI to fetch the JWKS. For example: - // - // .. code-block:: yaml - // - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // timeout: 1s - // - config.core.v4alpha.HttpUri http_uri = 1; - - // Duration after which the cached JWKS should be expired. If not specified, default cache - // duration is 5 minutes. - google.protobuf.Duration cache_duration = 2; - - // Fetch Jwks asynchronously in the main thread before the listener is activated. - // Fetched Jwks can be used by all worker threads. - // - // If this feature is not enabled: - // - // * The Jwks is fetched on-demand when the requests come. During the fetching, first - // few requests are paused until the Jwks is fetched. - // * Each worker thread fetches its own Jwks since Jwks cache is per worker thread. - // - // If this feature is enabled: - // - // * Fetched Jwks is done in the main thread before the listener is activated. Its fetched - // Jwks can be used by all worker threads. Each worker thread doesn't need to fetch its own. - // * Jwks is ready when the requests come, not need to wait for the Jwks fetching. - // - JwksAsyncFetch async_fetch = 3; - - // Retry policy for fetching Jwks. optional. turned off by default. - // - // For example: - // - // .. code-block:: yaml - // - // retry_policy: - // retry_back_off: - // base_interval: 0.01s - // max_interval: 20s - // num_retries: 10 - // - // will yield a randomized truncated exponential backoff policy with an initial delay of 10ms - // 10 maximum attempts spaced at most 20s seconds. - // - // .. code-block:: yaml - // - // retry_policy: - // num_retries:1 - // - // uses the default :ref:`retry backoff strategy `. - // with the default base interval is 1000 milliseconds. and the default maximum interval of 10 times the base interval. - // - // if num_retries is omitted, the default is to allow only one retry. - // - // - // If enabled, the retry policy will apply to all Jwks fetching approaches, e.g. on demand or asynchronously in background. - // - // - config.core.v4alpha.RetryPolicy retry_policy = 4; -} - -// Fetch Jwks asynchronously in the main thread when the filter config is parsed. -// The listener is activated only after the Jwks is fetched. -// When the Jwks is expired in the cache, it is fetched again in the main thread. -// The fetched Jwks from the main thread can be used by all worker threads. -message JwksAsyncFetch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwksAsyncFetch"; - - // If false, the listener is activated after the initial fetch is completed. - // The initial fetch result can be either successful or failed. - // If true, it is activated without waiting for the initial fetch to complete. - // Default is false. - bool fast_listener = 1; -} - -// This message specifies a header location to extract JWT token. -message JwtHeader { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtHeader"; - - // The HTTP header name. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The value prefix. The value format is "value_prefix" - // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the - // end. - string value_prefix = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; -} - -// Specify a required provider with audiences. -message ProviderWithAudiences { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences"; - - // Specify a required provider name. - string provider_name = 1; - - // This field overrides the one specified in the JwtProvider. - repeated string audiences = 2; -} - -// This message specifies a Jwt requirement. An empty message means JWT verification is not -// required. Here are some config examples: -// -// .. code-block:: yaml -// -// # Example 1: not required with an empty message -// -// # Example 2: require A -// provider_name: provider-A -// -// # Example 3: require A or B -// requires_any: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 4: require A and B -// requires_all: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 5: require A and (B or C) -// requires_all: -// requirements: -// - provider_name: provider-A -// - requires_any: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 6: require A or (B and C) -// requires_any: -// requirements: -// - provider_name: provider-A -// - requires_all: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows -// missing token.) -// requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// -// # Example 8: A is optional and B is required. -// requires_all: -// requirements: -// - requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// - provider_name: provider-B -// -// [#next-free-field: 7] -message JwtRequirement { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement"; - - oneof requires_type { - // Specify a required provider name. - string provider_name = 1; - - // Specify a required provider with audiences. - ProviderWithAudiences provider_and_audiences = 2; - - // Specify list of JwtRequirement. Their results are OR-ed. - // If any one of them passes, the result is passed. - JwtRequirementOrList requires_any = 3; - - // Specify list of JwtRequirement. Their results are AND-ed. - // All of them must pass, if one of them fails or missing, it fails. - JwtRequirementAndList requires_all = 4; - - // The requirement is always satisfied even if JWT is missing or the JWT - // verification fails. A typical usage is: this filter is used to only verify - // JWTs and pass the verified JWT payloads to another filter, the other filter - // will make decision. In this mode, all JWT tokens will be verified. - google.protobuf.Empty allow_missing_or_failed = 5; - - // The requirement is satisfied if JWT is missing, but failed if JWT is - // presented but invalid. Similar to allow_missing_or_failed, this is used - // to only verify JWTs and pass the verified payload to another filter. The - // different is this mode will reject requests with invalid tokens. - google.protobuf.Empty allow_missing = 6; - } -} - -// This message specifies a list of RequiredProvider. -// Their results are OR-ed; if any one of them passes, the result is passed -message JwtRequirementOrList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList"; - - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a list of RequiredProvider. -// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. -message JwtRequirementAndList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList"; - - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a Jwt requirement for a specific Route condition. -// Example 1: -// -// .. code-block:: yaml -// -// - match: -// prefix: /healthz -// -// In above example, "requires" field is empty for /healthz prefix match, -// it means that requests matching the path prefix don't require JWT authentication. -// -// Example 2: -// -// .. code-block:: yaml -// -// - match: -// prefix: / -// requires: { provider_name: provider-A } -// -// In above example, all requests matched the path prefix require jwt authentication -// from "provider-A". -message RequirementRule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.RequirementRule"; - - // The route matching parameter. Only when the match is satisfied, the "requires" field will - // apply. - // - // For example: following match will match all requests. - // - // .. code-block:: yaml - // - // match: - // prefix: / - // - config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Specify a Jwt requirement. - // If not specified, Jwt verification is disabled. - oneof requirement_type { - // Specify a Jwt requirement. Please see detail comment in message JwtRequirement. - JwtRequirement requires = 2; - - // Use requirement_name to specify a Jwt requirement. - // This requirement_name MUST be specified at the - // :ref:`requirement_map ` - // in `JwtAuthentication`. - string requirement_name = 3 [(validate.rules).string = {min_len: 1}]; - } -} - -// This message specifies Jwt requirements based on stream_info.filterState. -// This FilterState should use `Router::StringAccessor` object to set a string value. -// Other HTTP filters can use it to specify Jwt requirements dynamically. -// -// Example: -// -// .. code-block:: yaml -// -// name: jwt_selector -// requires: -// issuer_1: -// provider_name: issuer1 -// issuer_2: -// provider_name: issuer2 -// -// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, -// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. -message FilterStateRule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; - - // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A map of string keys to requirements. The string key is the string value - // in the FilterState with the name specified in the *name* field above. - map requires = 3; -} - -// This is the Envoy HTTP filter config for JWT authentication. -// -// For example: -// -// .. code-block:: yaml -// -// providers: -// provider1: -// issuer: issuer1 -// audiences: -// - audience1 -// - audience2 -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// timeout: 1s -// provider2: -// issuer: issuer2 -// local_jwks: -// inline_string: jwks_string -// -// rules: -// # Not jwt verification is required for /health path -// - match: -// prefix: /health -// -// # Jwt verification for provider1 is required for path prefixed with "prefix" -// - match: -// prefix: /prefix -// requires: -// provider_name: provider1 -// -// # Jwt verification for either provider1 or provider2 is required for all other requests. -// - match: -// prefix: / -// requires: -// requires_any: -// requirements: -// - provider_name: provider1 -// - provider_name: provider2 -// -// [#next-free-field: 6] -message JwtAuthentication { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication"; - - // Map of provider names to JwtProviders. - // - // .. code-block:: yaml - // - // providers: - // provider1: - // issuer: issuer1 - // audiences: - // - audience1 - // - audience2 - // remote_jwks: - // http_uri: - // uri: https://example.com/.well-known/jwks.json - // cluster: example_jwks_cluster - // timeout: 1s - // provider2: - // issuer: provider2 - // local_jwks: - // inline_string: jwks_string - // - map providers = 1; - - // Specifies requirements based on the route matches. The first matched requirement will be - // applied. If there are overlapped match conditions, please put the most specific match first. - // - // Examples - // - // .. code-block:: yaml - // - // rules: - // - match: - // prefix: /healthz - // - match: - // prefix: /baz - // requires: - // provider_name: provider1 - // - match: - // prefix: /foo - // requires: - // requires_any: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - match: - // prefix: /bar - // requires: - // requires_all: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - repeated RequirementRule rules = 2; - - // This message specifies Jwt requirements based on stream_info.filterState. - // Other HTTP filters can use it to specify Jwt requirements dynamically. - // The *rules* field above is checked first, if it could not find any matches, - // check this one. - FilterStateRule filter_state_rules = 3; - - // When set to true, bypass the `CORS preflight request - // `_ regardless of JWT - // requirements specified in the rules. - bool bypass_cors_preflight = 4; - - // A map of unique requirement_names to JwtRequirements. - // :ref:`requirement_name ` - // in `PerRouteConfig` uses this map to specify a JwtRequirement. - map requirement_map = 5; -} - -// Specify per-route config. -message PerRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.jwt_authn.v3.PerRouteConfig"; - - oneof requirement_specifier { - option (validate.required) = true; - - // Disable Jwt Authentication for this route. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Use requirement_name to specify a JwtRequirement. - // This requirement_name MUST be specified at the - // :ref:`requirement_map ` - // in `JwtAuthentication`. If no, the requests using this route will be rejected with 403. - string requirement_name = 2 [(validate.rules).string = {min_len: 1}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/BUILD deleted file mode 100644 index f833eacd57722..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", - "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto deleted file mode 100644 index 75002c995ccd4..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.oauth2.v4alpha; - -import "envoy/config/core/v4alpha/http_uri.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; -import "envoy/type/matcher/v4alpha/path.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v4alpha"; -option java_outer_classname = "OauthProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: OAuth] -// OAuth :ref:`configuration overview `. -// [#extension: envoy.filters.http.oauth2] -// - -message OAuth2Credentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Credentials"; - - // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. - string client_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. - transport_sockets.tls.v4alpha.SdsSecretConfig token_secret = 2 - [(validate.rules).message = {required: true}]; - - // Configures how the secret token should be created. - oneof token_formation { - option (validate.required) = true; - - // If present, the secret token will be a HMAC using the provided secret. - transport_sockets.tls.v4alpha.SdsSecretConfig hmac_secret = 3 - [(validate.rules).message = {required: true}]; - } -} - -// OAuth config -// -// [#next-free-field: 11] -message OAuth2Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Config"; - - // Endpoint on the authorization server to retrieve the access token from. - config.core.v4alpha.HttpUri token_endpoint = 1; - - // The endpoint redirect to for authorization in response to unauthorized requests. - string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; - - // Credentials used for OAuth. - OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; - - // The redirect URI passed to the authorization endpoint. Supports header formatting - // tokens. For more information, including details on header value syntax, see the - // documentation on :ref:`custom request headers `. - // - // This URI should not contain any query parameters. - string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; - - // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. - type.matcher.v4alpha.PathMatcher redirect_path_matcher = 5 - [(validate.rules).message = {required: true}]; - - // The path to sign a user out, clearing their credential cookies. - type.matcher.v4alpha.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}]; - - // Forward the OAuth token as a Bearer to upstream web service. - bool forward_bearer_token = 7; - - // Any request that matches any of the provided matchers will be passed through without OAuth validation. - repeated config.route.v4alpha.HeaderMatcher pass_through_matcher = 8; - - // Optional list of OAuth scopes to be claimed in the authorization request. If not specified, - // defaults to "user" scope. - // OAuth RFC https://tools.ietf.org/html/rfc6749#section-3.3 - repeated string auth_scopes = 9; - - // Optional resource parameter for authorization request - // RFC: https://tools.ietf.org/html/rfc8707 - repeated string resources = 10; -} - -// Filter config. -message OAuth2 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2"; - - // Leave this empty to disable OAuth2 for a specific route, using per filter config. - OAuth2Config config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v4alpha/BUILD deleted file mode 100644 index 329e11fc50179..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v4alpha:pkg", - "//envoy/extensions/filters/http/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v4alpha/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v4alpha/rate_limit.proto deleted file mode 100644 index 688be29e6aab7..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v4alpha/rate_limit.proto +++ /dev/null @@ -1,125 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ratelimit.v4alpha; - -import "envoy/config/ratelimit/v4alpha/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ratelimit.v4alpha"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.ratelimit] - -// [#next-free-field: 10] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ratelimit.v3.RateLimit"; - - // Defines the version of the standard to use for X-RateLimit headers. - enum XRateLimitHeadersRFCVersion { - // X-RateLimit headers disabled. - OFF = 0; - - // Use `draft RFC Version 03 `_. - DRAFT_VERSION_03 = 1; - } - - // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string = {min_len: 1}]; - - // Specifies the rate limit configurations to be applied with the same - // stage number. If not set, the default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The type of requests the filter should apply to. The supported - // types are *internal*, *external* or *both*. A request is considered internal if - // :ref:`x-envoy-internal` is set to true. If - // :ref:`x-envoy-internal` is not set or false, a - // request is considered external. The filter defaults to *both*, and it will apply to all request - // types. - string request_type = 3 - [(validate.rules).string = {in: "internal" in: "external" in: "both" in: ""}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - bool failure_mode_deny = 5; - - // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead - // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The - // HTTP code will be 200 for a gRPC response. - bool rate_limited_as_resource_exhausted = 6; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v4alpha.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message = {required: true}]; - - // Defines the standard version to use for X-RateLimit headers emitted by the filter: - // - // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the - // client in the current time-window followed by the description of the - // quota policy. The values are returned by the rate limiting service in - // :ref:`current_limit` - // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. - // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the - // current time-window. The values are returned by the rate limiting service - // in :ref:`limit_remaining` - // field. - // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of - // the current time-window. The values are returned by the rate limiting service - // in :ref:`duration_until_reset` - // field. - // - // In case rate limiting policy specifies more then one time window, the values - // above represent the window that is closest to reaching its limit. - // - // For more information about the headers specification see selected version of - // the `draft RFC `_. - // - // Disabled by default. - XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 - [(validate.rules).enum = {defined_only: true}]; - - // Disables emitting the :ref:`x-envoy-ratelimited` header - // in case of rate limiting (i.e. 429 responses). - // Having this header not present potentially makes the request retriable. - bool disable_x_envoy_ratelimited_header = 9; -} - -message RateLimitPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.ratelimit.v3.RateLimitPerRoute"; - - enum VhRateLimitsOptions { - // Use the virtual host rate limits unless the route has a rate limit policy. - OVERRIDE = 0; - - // Use the virtual host rate limits even if the route has a rate limit policy. - INCLUDE = 1; - - // Ignore the virtual host rate limits even if the route does not have a rate limit policy. - IGNORE = 2; - } - - // Specifies if the rate limit filter should include the virtual host rate limits. - VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/BUILD deleted file mode 100644 index 02db15d5bde27..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v4alpha:pkg", - "//envoy/extensions/filters/http/rbac/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto deleted file mode 100644 index 41040592caceb..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.rbac.v4alpha; - -import "envoy/config/rbac/v4alpha/rbac.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v4alpha"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.http.rbac] - -// RBAC filter config. -message RBAC { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.rbac.v3.RBAC"; - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - // If present and empty, DENY. - config.rbac.v4alpha.RBAC rules = 1; - - // Shadow rules are not enforced by the filter (i.e., returning a 403) - // but will emit stats and logs and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v4alpha.RBAC shadow_rules = 2; - - // If specified, shadow rules will emit stats with the given prefix. - // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with - // shadow rules. - string shadow_rules_stat_prefix = 3; -} - -message RBACPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.rbac.v3.RBACPerRoute"; - - reserved 1; - - // Override the global configuration of the filter with this new config. - // If absent, the global RBAC policy will be disabled for this route. - RBAC rbac = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD deleted file mode 100644 index b22ea48735c71..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/extensions/filters/http/router/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto b/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto deleted file mode 100644 index 2d72bd1470c02..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/router/v4alpha/router.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.router.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v4alpha"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Router] -// Router :ref:`configuration overview `. -// [#extension: envoy.filters.http.router] - -// [#next-free-field: 8] -message Router { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.router.v3.Router"; - - // Whether the router generates dynamic cluster statistics. Defaults to - // true. Can be disabled in high performance scenarios. - google.protobuf.BoolValue dynamic_stats = 1; - - // Whether to start a child span for egress routed calls. This can be - // useful in scenarios where other filters (auth, ratelimit, etc.) make - // outbound calls and have child spans rooted at the same ingress - // parent. Defaults to false. - bool start_child_span = 2; - - // Configuration for HTTP upstream logs emitted by the router. Upstream logs - // are configured in the same way as access logs, but each log entry represents - // an upstream request. Presuming retries are configured, multiple upstream - // requests may be made for each downstream (inbound) request. - repeated config.accesslog.v4alpha.AccessLog upstream_log = 3; - - // Do not add any additional *x-envoy-* headers to requests or responses. This - // only affects the :ref:`router filter generated *x-envoy-* headers - // `, other Envoy filters and the HTTP - // connection manager may continue to set *x-envoy-* headers. - bool suppress_envoy_headers = 4; - - // Specifies a list of HTTP headers to strictly validate. Envoy will reject a - // request and respond with HTTP status 400 if the request contains an invalid - // value for any of the headers listed in this field. Strict header checking - // is only supported for the following headers: - // - // Value must be a ','-delimited list (i.e. no spaces) of supported retry - // policy values: - // - // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` - // * :ref:`config_http_filters_router_x-envoy-retry-on` - // - // Value must be an integer: - // - // * :ref:`config_http_filters_router_x-envoy-max-retries` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated = { - items { - string { - in: "x-envoy-upstream-rq-timeout-ms" - in: "x-envoy-upstream-rq-per-try-timeout-ms" - in: "x-envoy-max-retries" - in: "x-envoy-retry-grpc-on" - in: "x-envoy-retry-on" - } - } - }]; - - // If not set, ingress Envoy will ignore - // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress - // Envoy, when deriving timeout for upstream cluster. - bool respect_expected_rq_timeout = 6; - - // If set, Envoy will avoid incrementing HTTP failure code stats - // on gRPC requests. This includes the individual status code value - // (e.g. upstream_rq_504) and group stats (e.g. upstream_rq_5xx). - // This field is useful if interested in relying only on the gRPC - // stats filter to define success and failure metrics for gRPC requests - // as not all failed gRPC requests charge HTTP status code metrics. See - // :ref:`gRPC stats filter` documentation - // for more details. - bool suppress_grpc_request_failure_code_stats = 7; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/BUILD deleted file mode 100644 index 7e5b65cef9b51..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/tap/v4alpha:pkg", - "//envoy/extensions/filters/http/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/tap.proto b/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/tap.proto deleted file mode 100644 index 98798be8bfd2b..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/tap/v4alpha/tap.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.tap.v4alpha; - -import "envoy/extensions/common/tap/v4alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v4alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap] -// Tap :ref:`configuration overview `. -// [#extension: envoy.filters.http.tap] - -// Top level configuration for the tap filter. -message Tap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.http.tap.v3.Tap"; - - // Common configuration for the HTTP tap filter. - common.tap.v4alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD deleted file mode 100644 index 752598d2f6250..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto deleted file mode 100644 index 30499c27f6f0a..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.v4alpha; - -import "envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; -option java_outer_classname = "DubboProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dubbo Proxy] -// Dubbo Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.dubbo_proxy] - -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - // the default protocol. - Dubbo = 0; -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - // the default serialization protocol. - Hessian2 = 0; -} - -// [#next-free-field: 6] -message DubboProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; - - // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; - - // The route table for the connection manager is static and is specified in this property. - repeated RouteConfiguration route_config = 4; - - // A list of individual Dubbo filters that make up the filter chain for requests made to the - // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no dubbo_filters are specified, a default Dubbo router filter - // (`envoy.filters.dubbo.router`) is used. - repeated DubboFilter dubbo_filters = 5; -} - -// DubboFilter configures a Dubbo filter. -message DubboFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter"; - - // The name of the filter to instantiate. The name must match a supported - // filter. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any config = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto deleted file mode 100644 index d6314279ed2b6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto +++ /dev/null @@ -1,129 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.v4alpha; - -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/v3/range.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dubbo Proxy Route Configuration] -// Dubbo Proxy :ref:`configuration overview `. - -// [#next-free-field: 6] -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration"; - - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The interface name of the service. Wildcard interface are supported in the suffix or prefix form. - // e.g. ``*.methods.add`` will match ``com.dev.methods.add``, ``com.prod.methods.add``, etc. - // ``com.dev.methods.*`` will match ``com.dev.methods.add``, ``com.dev.methods.update``, etc. - // Special wildcard ``*`` matching any interface. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*.methods.add`` will match ``com.dev.methods.add`` but not ``.methods.add``. - string interface = 2; - - // Which group does the interface belong to. - string group = 3; - - // The version number of the interface. - string version = 4; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 5; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch"; - - // Method level routing matching. - MethodMatch method = 1; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated config.route.v4alpha.HeaderMatcher headers = 2; -} - -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction"; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - // Currently ClusterWeight only supports the name and weight fields. - config.route.v4alpha.WeightedCluster weighted_clusters = 2; - } -} - -message MethodMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch"; - - // The parameter matching type. - message ParameterMatchSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier"; - - oneof parameter_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 3; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting - // of an optional plus or minus sign followed by a sequence of digits. The rule will not match - // if the header value does not represent an integer. Match will fail for empty values, - // floating point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, - // "somestring", 10.9, "-1somestring" - type.v3.Int64Range range_match = 4; - } - } - - // The name of the method. - type.matcher.v4alpha.StringMatcher name = 1; - - // Method parameter definition. - // The key is the parameter index, starting from 0. - // The value is the parameter matching type. - map params_match = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD deleted file mode 100644 index 6d146b1c64d18..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/network/ext_authz/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto deleted file mode 100644 index 21f30481292fa..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ext_authz.v4alpha; - -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/grpc_service.proto"; -import "envoy/type/matcher/v4alpha/metadata.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v4alpha"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Network External Authorization ] -// The network layer external authorization service configuration -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.ext_authz] - -// External Authorization filter calls out to an external service over the -// gRPC Authorization API defined by -// :ref:`CheckRequest `. -// A failed check will cause this filter to close the TCP connection. -// [#next-free-field: 8] -message ExtAuthz { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.ext_authz.v3.ExtAuthz"; - - // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The external authorization gRPC service configuration. - // The default timeout is set to 200ms by this filter. - config.core.v4alpha.GrpcService grpc_service = 2; - - // The filter's behaviour in case the external authorization service does - // not respond back. When it is set to true, Envoy will also allow traffic in case of - // communication failure between authorization service and the proxy. - // Defaults to false. - bool failure_mode_allow = 3; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 4; - - // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and - // version of Check{Request,Response} used on the wire. - config.core.v4alpha.ApiVersion transport_api_version = 5 - [(validate.rules).enum = {defined_only: true}]; - - // Specifies if the filter is enabled with metadata matcher. - // If this field is not specified, the filter will be enabled for all requests. - type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 6; - - // Optional labels that will be passed to :ref:`labels` in - // :ref:`destination`. - // The labels will be read from :ref:`metadata` with the specified key. - string bootstrap_metadata_labels_key = 7; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index f09aac839adc5..b5544eaa93b7c 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -506,24 +506,7 @@ message HttpConnectionManager { // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - // - // .. note:: - // This field is deprecated and instead :ref:`original_ip_detection_extensions - // ` - // should be used to configure the :ref:`xff extension ` - // to configure IP detection using the :ref:`config_http_conn_man_headers_x-forwarded-for` header. To replace - // this field use a config like the following: - // - // .. code-block:: yaml - // - // original_ip_detection_extensions: - // - name: envoy.http.original_ip_detection.xff - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.xff.v3.XffConfig - // xff_num_trusted_hops: 1 - // - uint32 xff_num_trusted_hops = 19 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + uint32 xff_num_trusted_hops = 19; // The configuration for the original IP detection extensions. // @@ -535,6 +518,12 @@ message HttpConnectionManager { // the request. If the request isn't rejected nor any extension succeeds, the HCM will // fallback to using the remote address. // + // .. WARNING:: + // Extensions cannot be used in conjunction with :ref:`use_remote_address + // ` + // nor :ref:`xff_num_trusted_hops + // `. + // // [#extension-category: envoy.http.original_ip_detection] repeated config.core.v3.TypedExtensionConfig original_ip_detection_extensions = 46; diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD deleted file mode 100644 index 37cbc68f19156..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/config/trace/v4alpha:pkg", - "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", - "//envoy/type/http/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto deleted file mode 100644 index d2332a1c9bb91..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ /dev/null @@ -1,1030 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.http_connection_manager.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/config/core/v4alpha/protocol.proto"; -import "envoy/config/core/v4alpha/substitution_format_string.proto"; -import "envoy/config/route/v4alpha/route.proto"; -import "envoy/config/route/v4alpha/scoped_route.proto"; -import "envoy/config/trace/v4alpha/http_tracer.proto"; -import "envoy/type/http/v3/path_transformation.proto"; -import "envoy/type/tracing/v3/custom_tag.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v4alpha"; -option java_outer_classname = "HttpConnectionManagerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP connection manager] -// HTTP connection manager :ref:`configuration overview `. -// [#extension: envoy.filters.network.http_connection_manager] - -// [#next-free-field: 49] -message HttpConnectionManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; - - enum CodecType { - // For every new connection, the connection manager will determine which - // codec to use. This mode supports both ALPN for TLS listeners as well as - // protocol inference for plaintext listeners. If ALPN data is available, it - // is preferred, otherwise protocol inference is used. In almost all cases, - // this is the right option to choose for this setting. - AUTO = 0; - - // The connection manager will assume that the client is speaking HTTP/1.1. - HTTP1 = 1; - - // The connection manager will assume that the client is speaking HTTP/2 - // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. - // Prior knowledge is allowed). - HTTP2 = 2; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 3; - } - - enum ServerHeaderTransformation { - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - } - - // Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. - // This operation occurs before URL normalization and the merge slashes transformations if they were enabled. - enum PathWithEscapedSlashesAction { - // Default behavior specific to implementation (i.e. Envoy) of this configuration option. - // Envoy, by default, takes the KEEP_UNCHANGED action. - // NOTE: the implementation may change the default behavior at-will. - IMPLEMENTATION_SPECIFIC_DEFAULT = 0; - - // Keep escaped slashes. - KEEP_UNCHANGED = 1; - - // Reject client request with the 400 status. gRPC requests will be rejected with the INTERNAL (13) error code. - // The "httpN.downstream_rq_failed_path_normalization" counter is incremented for each rejected request. - REJECT_REQUEST = 2; - - // Unescape %2F and %5C sequences and redirect request to the new path if these sequences were present. - // Redirect occurs after path normalization and merge slashes transformations if they were configured. - // NOTE: gRPC requests will be rejected with the INTERNAL (13) error code. - // This option minimizes possibility of path confusion exploits by forcing request with unescaped slashes to - // traverse all parties: downstream client, intermediate proxies, Envoy and upstream server. - // The "httpN.downstream_rq_redirected_with_normalized_path" counter is incremented for each - // redirected request. - UNESCAPE_AND_REDIRECT = 3; - - // Unescape %2F and %5C sequences. - // Note: this option should not be enabled if intermediaries perform path based access control as - // it may lead to path confusion vulnerabilities. - UNESCAPE_AND_FORWARD = 4; - } - - // [#next-free-field: 10] - message Tracing { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing"; - - enum OperationName { - // The HTTP listener is used for ingress/incoming requests. - INGRESS = 0; - - // The HTTP listener is used for egress/outgoing requests. - EGRESS = 1; - } - - reserved 1, 2; - - reserved "operation_name", "request_headers_for_tags"; - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.v3.Percent client_sampling = 3; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.Percent random_sampling = 4; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.Percent overall_sampling = 5; - - // Whether to annotate spans with additional data. If true, spans will include logs for stream - // events. - bool verbose = 6; - - // Maximum length of the request path to extract and include in the HttpUrl tag. Used to - // truncate lengthy request paths to meet the needs of a tracing backend. - // Default: 256 - google.protobuf.UInt32Value max_path_tag_length = 7; - - // A list of custom tags with unique tag name to create tags for the active span. - repeated type.tracing.v3.CustomTag custom_tags = 8; - - // Configuration for an external tracing provider. - // If not specified, no tracing will be performed. - // - // .. attention:: - // Please be aware that *envoy.tracers.opencensus* provider can only be configured once - // in Envoy lifetime. - // Any attempts to reconfigure it or to use different configurations for different HCM filters - // will be rejected. - // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes - // on OpenCensus side. - config.trace.v4alpha.Tracing.Http provider = 9; - } - - message InternalAddressConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." - "InternalAddressConfig"; - - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - - // [#next-free-field: 7] - message SetCurrentClientCertDetails { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." - "SetCurrentClientCertDetails"; - - reserved 2; - - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - } - - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." - "UpgradeConfig"; - - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - } - - // [#not-implemented-hide:] Transformations that apply to path headers. Transformations are applied - // before any processing of requests by HTTP filters, routing, and matching. Only the normalized - // path will be visible internally if a transformation is enabled. Any path rewrites that the - // router performs (e.g. :ref:`regex_rewrite - // ` or :ref:`prefix_rewrite - // `) will apply to the *:path* header - // destined for the upstream. - // - // Note: access logging and tracing will show the original *:path* header. - message PathNormalizationOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." - "PathNormalizationOptions"; - - // [#not-implemented-hide:] Normalization applies internally before any processing of requests by - // HTTP filters, routing, and matching *and* will affect the forwarded *:path* header. Defaults - // to :ref:`NormalizePathRFC3986 - // `. When not - // specified, this value may be overridden by the runtime variable - // :ref:`http_connection_manager.normalize_path`. - // Envoy will respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 - // normalization due to disallowed characters.) - type.http.v3.PathTransformation forwarding_transformation = 1; - - // [#not-implemented-hide:] Normalization only applies internally before any processing of - // requests by HTTP filters, routing, and matching. These will be applied after full - // transformation is applied. The *:path* header before this transformation will be restored in - // the router filter and sent upstream unless it was mutated by a filter. Defaults to no - // transformations. - // Multiple actions can be applied in the same Transformation, forming a sequential - // pipeline. The transformations will be performed in the order that they appear. Envoy will - // respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 - // normalization due to disallowed characters.) - type.http.v3.PathTransformation http_filter_transformation = 2; - } - - reserved 27, 11; - - reserved "idle_timeout"; - - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; - - // The route table for the connection manager is static and is specified in this property. - config.route.v4alpha.RouteConfiguration route_config = 4; - - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } - - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. :ref:`Order matters ` - // as the filters are processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; - - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; - - // Presence of the object defines whether the connection manager - // emits :ref:`tracing ` data to the :ref:`configured tracing provider - // `. - Tracing tracing = 7; - - // Additional settings for HTTP requests handled by the connection manager. These will be - // applicable to both HTTP1 and HTTP2 requests. - config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; - - // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. - // [#not-implemented-hide:] - config.core.v4alpha.Http3ProtocolOptions http3_protocol_options = 44; - - // An optional override that the connection manager will write to the server - // header in responses. If not set, the default is *envoy*. - string server_name = 10 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Defines the action to be applied to the Server header on the response path. - // By default, Envoy will overwrite the header with the value specified in - // server_name. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; - - // Allows for explicit transformation of the :scheme header on the request path. - // If not set, Envoy's default :ref:`scheme ` - // handling applies. - config.core.v4alpha.SchemeHeaderTransformation scheme_header_transformation = 48; - - // The maximum request headers size for incoming connections. - // If unconfigured, the default max request headers allowed is 60 KiB. - // Requests that exceed this limit will receive a 431 response. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 8192 gt: 0}]; - - // The stream idle timeout for connections managed by the connection manager. - // If not specified, this defaults to 5 minutes. The default value was selected - // so as not to interfere with any smaller configured timeouts that may have - // existed in configurations prior to the introduction of this feature, while - // introducing robustness to TCP connections that terminate without a FIN. - // - // This idle timeout applies to new streams and is overridable by the - // :ref:`route-level idle_timeout - // `. Even on a stream in - // which the override applies, prior to receipt of the initial request - // headers, the :ref:`stream_idle_timeout - // ` - // applies. Each time an encode/decode event for headers or data is processed - // for the stream, the timer will be reset. If the timeout fires, the stream - // is terminated with a 408 Request Timeout error code if no upstream response - // header has been received, otherwise a stream reset occurs. - // - // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough - // window to write any remaining stream data once the entirety of stream data (local end stream is - // true) has been buffered pending available window. In other words, this timeout defends against - // a peer that does not release enough window to completely write the stream, even though all - // data has been proxied within available flow control windows. If the timeout is hit in this - // case, the :ref:`tx_flush_timeout ` counter will be - // incremented. Note that :ref:`max_stream_duration - // ` does not apply to - // this corner case. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled according to the value for - // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - // - // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due - // to the granularity of events presented to the connection manager. For example, while receiving - // very large request headers, it may be the case that there is traffic regularly arriving on the - // wire while the connection manage is only able to observe the end-of-headers event, hence the - // stream may still idle timeout. - // - // A value of 0 will completely disable the connection manager stream idle - // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The amount of time that Envoy will wait for the entire request to be received. - // The timer is activated when the request is initiated, and is disarmed when the last byte of the - // request is sent upstream (i.e. all decoding filters have processed the request), OR when the - // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The amount of time that Envoy will wait for the request headers to be received. The timer is - // activated when the first byte of the headers is received, and is disarmed when the last byte of - // the headers has been received. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_headers_timeout = 41 [ - (validate.rules).duration = {gte {}}, - (udpa.annotations.security).configure_for_untrusted_downstream = true - ]; - - // The time that Envoy will wait between sending an HTTP/2 “shutdown - // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - // This is used so that Envoy provides a grace period for new streams that - // race with the final GOAWAY frame. During this grace period, Envoy will - // continue to accept new streams. After the grace period, a final GOAWAY - // frame is sent and Envoy will start refusing new streams. Draining occurs - // both when a connection hits the idle timeout or during general server - // draining. The default grace period is 5000 milliseconds (5 seconds) if this - // option is not specified. - google.protobuf.Duration drain_timeout = 12; - - // The delayed close timeout is for downstream connections managed by the HTTP connection manager. - // It is defined as a grace period after connection close processing has been locally initiated - // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy - // from the downstream connection) prior to Envoy closing the socket associated with that - // connection. - // NOTE: This timeout is enforced even when the socket associated with the downstream connection - // is pending a flush of the write buffer. However, any progress made writing data to the socket - // will restart the timer associated with this timeout. This means that the total grace period for - // a socket in this state will be - // +. - // - // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close - // sequence mitigates a race condition that exists when downstream clients do not drain/process - // data in a connection's receive buffer after a remote close has been detected via a socket - // write(). This race leads to such clients failing to process the response code sent by Envoy, - // which could result in erroneous downstream processing. - // - // If the timeout triggers, Envoy will close the connection's socket. - // - // The default timeout is 1000 ms if this option is not specified. - // - // .. NOTE:: - // To be useful in avoiding the race condition described above, this timeout must be set - // to *at least* +<100ms to account for - // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. - // - // .. WARNING:: - // A value of 0 will completely disable delayed close processing. When disabled, the downstream - // connection's socket will be closed immediately after the write flush is completed or will - // never close if the write flush does not complete. - google.protobuf.Duration delayed_close_timeout = 26; - - // Configuration for :ref:`HTTP access logs ` - // emitted by the connection manager. - repeated config.accesslog.v4alpha.AccessLog access_log = 13; - - // If set to true, the connection manager will use the real remote address - // of the client connection when determining internal versus external origin and manipulating - // various headers. If set to false or absent, the connection manager will use the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for`, - // :ref:`config_http_conn_man_headers_x-envoy-internal`, and - // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The number of additional ingress proxy hops from the right side of the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - // determining the origin client's IP address. The default is zero if this option - // is not specified. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - // - // .. note:: - // This field is deprecated and instead :ref:`original_ip_detection_extensions - // ` - // should be used to configure the :ref:`xff extension ` - // to configure IP detection using the :ref:`config_http_conn_man_headers_x-forwarded-for` header. To replace - // this field use a config like the following: - // - // .. code-block:: yaml - // - // original_ip_detection_extensions: - // - name: envoy.http.original_ip_detection.xff - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.xff.v3.XffConfig - // xff_num_trusted_hops: 1 - // - uint32 hidden_envoy_deprecated_xff_num_trusted_hops = 19 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The configuration for the original IP detection extensions. - // - // When configured the extensions will be called along with the request headers - // and information about the downstream connection, such as the directly connected address. - // Each extension will then use these parameters to decide the request's effective remote address. - // If an extension fails to detect the original IP address and isn't configured to reject - // the request, the HCM will try the remaining extensions until one succeeds or rejects - // the request. If the request isn't rejected nor any extension succeeds, the HCM will - // fallback to using the remote address. - // - // [#extension-category: envoy.http.original_ip_detection] - repeated config.core.v4alpha.TypedExtensionConfig original_ip_detection_extensions = 46; - - // Configures what network addresses are considered internal for stats and header sanitation - // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. - // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information about internal/external addresses. - InternalAddressConfig internal_address_config = 25; - - // If set, Envoy will not append the remote address to the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in - // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager - // has mutated the request headers. While :ref:`use_remote_address - // ` - // will also suppress XFF addition, it has consequences for logging and other - // Envoy uses of the remote address, so *skip_xff_append* should be used - // when only an elision of XFF addition is intended. - bool skip_xff_append = 21; - - // Via header value to append to request and response headers. If this is - // empty, no via header will be appended. - string via = 22 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Whether the connection manager will generate the :ref:`x-request-id - // ` header if it does not exist. This defaults to - // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature - // is not desired it can be disabled. - google.protobuf.BoolValue generate_request_id = 15; - - // Whether the connection manager will keep the :ref:`x-request-id - // ` header if passed for a request that is edge - // (Edge request is the request from external clients to front Envoy) and not reset it, which - // is the current Envoy behaviour. This defaults to false. - bool preserve_external_request_id = 32; - - // If set, Envoy will always set :ref:`x-request-id ` header in response. - // If this is false or not set, the request ID is returned in responses only if tracing is forced using - // :ref:`x-envoy-force-trace ` header. - bool always_set_request_id_in_response = 37; - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; - - // This field is valid only when :ref:`forward_client_cert_details - // ` - // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in - // the client certificate to be forwarded. Note that in the - // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - // *By* is always set when the client certificate presents the URI type Subject Alternative Name - // value. - SetCurrentClientCertDetails set_current_client_cert_details = 17; - - // If proxy_100_continue is true, Envoy will proxy incoming "Expect: - // 100-continue" headers upstream, and forward "100 Continue" responses - // downstream. If this is false or not set, Envoy will instead strip the - // "Expect: 100-continue" header, and send a "100 Continue" response itself. - bool proxy_100_continue = 18; - - // If - // :ref:`use_remote_address - // ` - // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. - // This is useful for testing compatibility of upstream services that parse the header value. For - // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - // `_ for details. This will also affect the - // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See - // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - // ` for runtime - // control. - // [#not-implemented-hide:] - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - - repeated UpgradeConfig upgrade_configs = 23; - - // Should paths be normalized according to RFC 3986 before any processing of - // requests by HTTP filters or routing? This affects the upstream *:path* header - // as well. For paths that fail this check, Envoy will respond with 400 to - // paths that are malformed. This defaults to false currently but will default - // true in the future. When not specified, this value may be overridden by the - // runtime variable - // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison `_ - // for details of normalization. - // Note that Envoy does not perform - // `case normalization `_ - google.protobuf.BoolValue normalize_path = 30; - - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec `_ and is provided for convenience. - bool merge_slashes = 33; - - // Action to take when request URL path contains escaped slash sequences (%2F, %2f, %5C and %5c). - // The default value can be overridden by the :ref:`http_connection_manager.path_with_escaped_slashes_action` - // runtime variable. - // The :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime - // variable can be used to apply the action to a portion of all requests. - PathWithEscapedSlashesAction path_with_escaped_slashes_action = 45; - - // The configuration of the request ID extension. This includes operations such as - // generation, validation, and associated tracing operations. If empty, the - // :ref:`UuidRequestIdConfig ` - // default extension is used with default parameters. See the documentation for that extension - // for details on what it does. Customizing the configuration for the default extension can be - // achieved by configuring it explicitly here. For example, to disable trace reason packing, - // the following configuration can be used: - // - // .. validated-code-block:: yaml - // :type-name: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension - // - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig - // pack_trace_reason: false - // - // [#extension-category: envoy.request_id] - RequestIDExtension request_id_extension = 36; - - // The configuration to customize local reply returned by Envoy. It can customize status code, - // body text and response content type. If not specified, status code and text body are hard - // coded in Envoy, the response content type is plain text. - LocalReplyConfig local_reply_config = 38; - - oneof strip_port_mode { - // Determines if the port part should be removed from host/authority header before any processing - // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` - // local port. This affects the upstream host header unless the method is - // CONNECT in which case if no filter adds a port the original port will be restored before headers are - // sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. - bool strip_matching_host_port = 39; - - // Determines if the port part should be removed from host/authority header before any processing - // of request by HTTP filters or routing. - // This affects the upstream host header unless the method is CONNECT in - // which case if no filter adds a port the original port will be restored before headers are sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. - bool strip_any_host_port = 42; - } - - // Governs Envoy's behavior when receiving invalid HTTP from downstream. - // If this option is false (default), Envoy will err on the conservative side handling HTTP - // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. - // If this option is set to true, Envoy will be more permissive, only resetting the invalid - // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire - // request is read for HTTP/1.1) - // In general this should be true for deployments receiving trusted traffic (L2 Envoys, - // company-internal mesh) and false when receiving untrusted traffic (edge deployments). - // - // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are - // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message - // ` or the new HTTP/2 option - // :ref:`override_stream_error_on_invalid_http_message - // ` - // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging - // ` - google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; - - // [#not-implemented-hide:] Path normalization configuration. This includes - // configurations for transformations (e.g. RFC 3986 normalization or merge - // adjacent slashes) and the policy to apply them. The policy determines - // whether transformations affect the forwarded *:path* header. RFC 3986 path - // normalization is enabled by default and the default policy is that the - // normalized header will be forwarded. See :ref:`PathNormalizationOptions - // ` - // for details. - PathNormalizationOptions path_normalization_options = 43; - - // Determines if trailing dot of the host should be removed from host/authority header before any - // processing of request by HTTP filters or routing. - // This affects the upstream host header. - // Without setting this option, incoming requests with host `example.com.` will not match against - // route with :ref:`domains` match set to `example.com`. Defaults to `false`. - // When the incoming request contains a host/authority header that includes a port number, - // setting this option will strip a trailing dot, if present, from the host section, - // leaving the port as is (e.g. host value `example.com.:443` will be updated to `example.com:443`). - bool strip_trailing_host_dot = 47; -} - -// The configuration to customize local reply returned by Envoy. -message LocalReplyConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig"; - - // Configuration of list of mappers which allows to filter and change local response. - // The mappers will be checked by the specified order until one is matched. - repeated ResponseMapper mappers = 1; - - // The configuration to form response body from the :ref:`command operators ` - // and to specify response content type as one of: plain/text or application/json. - // - // Example one: "plain/text" ``body_format``. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // The following response body in "plain/text" format will be generated for a request with - // local reply body of "upstream connection error", response_code=503 and path=/foo. - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - // Example two: "application/json" ``body_format``. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // json_format: - // status: "%RESPONSE_CODE%" - // message: "%LOCAL_REPLY_BODY%" - // path: "%REQ(:path)%" - // - // The following response body in "application/json" format would be generated for a request with - // local reply body of "upstream connection error", response_code=503 and path=/foo. - // - // .. code-block:: json - // - // { - // "status": 503, - // "message": "upstream connection error", - // "path": "/foo" - // } - // - config.core.v4alpha.SubstitutionFormatString body_format = 2; -} - -// The configuration to filter and change local response. -// [#next-free-field: 6] -message ResponseMapper { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; - - // Filter to determine if this mapper should apply. - config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; - - // The new response status code if specified. - google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_format`. - config.core.v4alpha.DataSource body = 3; - - // A per mapper `body_format` to override the :ref:`body_format `. - // It will be used when this mapper is matched. - config.core.v4alpha.SubstitutionFormatString body_format_override = 4; - - // HTTP headers to add to a local reply. This allows the response mapper to append, to add - // or to override headers of any local reply before it is sent to a downstream client. - repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; -} - -message Rds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.Rds"; - - // Configuration source specifier for RDS. - config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2; -} - -// This message is used to work around the limitations with 'oneof' and repeated fields. -message ScopedRouteConfigurationsList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList"; - - repeated config.route.v4alpha.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated = {min_items: 1}]; -} - -// [#next-free-field: 6] -message ScopedRoutes { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes"; - - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These - // keys are matched against a set of :ref:`Key` - // objects assembled from :ref:`ScopedRouteConfiguration` - // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via - // :ref:`scoped_route_configurations_list`. - // - // Upon receiving a request's headers, the Router will build a key using the algorithm specified - // by this message. This key will be used to look up the routing table (i.e., the - // :ref:`RouteConfiguration`) to use for the request. - message ScopeKeyBuilder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder"; - - // Specifies the mechanism for constructing key fragments which are composed into scope keys. - message FragmentBuilder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." - "ScopeKeyBuilder.FragmentBuilder"; - - // Specifies how the value of a header should be extracted. - // The following example maps the structure of a header to the fields in this message. - // - // .. code:: - // - // <0> <1> <-- index - // X-Header: a=b;c=d - // | || | - // | || \----> - // | || - // | |\----> - // | | - // | \----> - // | - // \----> - // - // Each 'a=b' key-value pair constitutes an 'element' of the header field. - message HeaderValueExtractor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." - "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor"; - - // Specifies a header field's key value pair to match on. - message KvElement { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." - "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement"; - - // The separator between key and value (e.g., '=' separates 'k=v;...'). - // If an element is an empty string, the element is ignored. - // If an element contains no separator, the whole element is parsed as key and the - // fragment value is an empty string. - // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_len: 1}]; - - // The key to match on. - string key = 2 [(validate.rules).string = {min_len: 1}]; - } - - // The name of the header field to extract the value from. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - - oneof extract_type { - // Specifies the zero based index of the element to extract. - // Note Envoy concatenates multiple values of the same header key into a comma separated - // string, the splitting always happens after the concatenation. - uint32 index = 3; - - // Specifies the key value pair to extract the value from. - KvElement element = 4; - } - } - - oneof type { - option (validate.required) = true; - - // Specifies how a header field's value should be extracted. - HeaderValueExtractor header_value_extractor = 1; - } - } - - // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the - // fragments of a :ref:`ScopedRouteConfiguration`. - // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; - - // Configuration source specifier for RDS. - // This config source is used to subscribe to RouteConfiguration resources specified in - // ScopedRouteConfiguration messages. - config.core.v4alpha.ConfigSource rds_config_source = 3 - [(validate.rules).message = {required: true}]; - - oneof config_specifier { - option (validate.required) = true; - - // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by - // matching a key constructed from the request's attributes according to the algorithm specified - // by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRouteConfigurationsList scoped_route_configurations_list = 4; - - // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS - // API. A scope is assigned to a request by matching a key constructed from the request's - // attributes according to the algorithm specified by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRds scoped_rds = 5; - } -} - -message ScopedRds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds"; - - // Configuration source specifier for scoped RDS. - config.core.v4alpha.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message = {required: true}]; - - // xdstp:// resource locator for scoped RDS collection. - // [#not-implemented-hide:] - string srds_resources_locator = 2; -} - -// [#next-free-field: 7] -message HttpFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; - - reserved 3, 2; - - reserved "config"; - - // The name of the filter configuration. The name is used as a fallback to - // select an extension if the type of the configuration proto is not - // sufficient. It also serves as a resource name in ExtensionConfigDS. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - // - // To support configuring a :ref:`match tree `, use an - // :ref:`ExtensionWithMatcher ` - // with the desired HTTP filter. - // [#extension-category: envoy.filters.http] - google.protobuf.Any typed_config = 4; - - // Configuration source specifier for an extension configuration discovery service. - // In case of a failure and without the default configuration, the HTTP listener responds with code 500. - // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). - // - // To support configuring a :ref:`match tree `, use an - // :ref:`ExtensionWithMatcher ` - // with the desired HTTP filter. This works for both the default filter configuration as well - // as for filters provided via the API. - config.core.v4alpha.ExtensionConfigSource config_discovery = 5; - } - - // If true, clients that do not support this filter may ignore the - // filter but otherwise accept the config. - // Otherwise, clients that do not support this filter must reject the config. - // This is also same with typed per filter config. - bool is_optional = 6; -} - -message RequestIDExtension { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension"; - - // Request ID extension specific configuration. - google.protobuf.Any typed_config = 1; -} - -// [#protodoc-title: Envoy Mobile HTTP connection manager] -// HTTP connection manager for use in Envoy mobile. -// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] -message EnvoyMobileHttpConnectionManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.http_connection_manager.v3." - "EnvoyMobileHttpConnectionManager"; - - // The configuration for the underlying HttpConnectionManager which will be - // instantiated for Envoy mobile. - HttpConnectionManager config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v4alpha/BUILD deleted file mode 100644 index d9d0ca109526e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v4alpha:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "//envoy/extensions/filters/network/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v4alpha/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v4alpha/rate_limit.proto deleted file mode 100644 index b53cb3bcc1d09..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v4alpha/rate_limit.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ratelimit.v4alpha; - -import "envoy/config/ratelimit/v4alpha/rls.proto"; -import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ratelimit.v4alpha"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.ratelimit] - -// [#next-free-field: 7] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.ratelimit.v3.RateLimit"; - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string = {min_len: 1}]; - - // The rate limit descriptor list to use in the rate limit service request. - repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated = {min_items: 1}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 5; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v4alpha.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/BUILD deleted file mode 100644 index 27418dd3299e4..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v4alpha:pkg", - "//envoy/extensions/filters/network/rbac/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto b/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto deleted file mode 100644 index 3512bae2d2aba..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rbac.v4alpha; - -import "envoy/config/rbac/v4alpha/rbac.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v4alpha"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.network.rbac] - -// RBAC network filter config. -// -// Header should not be used in rules/shadow_rules in RBAC network filter as -// this information is only available in :ref:`RBAC http filter `. -// [#next-free-field: 6] -message RBAC { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rbac.v3.RBAC"; - - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - } - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - // If present and empty, DENY. - config.rbac.v4alpha.RBAC rules = 1; - - // Shadow rules are not enforced by the filter but will emit stats and logs - // and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v4alpha.RBAC shadow_rules = 2; - - // If specified, shadow rules will emit stats with the given prefix. - // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with - // shadow rules. - string shadow_rules_stat_prefix = 5; - - // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; - - // RBAC enforcement strategy. By default RBAC will be enforced only once - // when the first byte of data arrives from the downstream. When used in - // conjunction with filters that emit dynamic metadata after decoding - // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to - // CONTINUOUS to enforce RBAC policies on every message boundary. - EnforcementType enforcement_type = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD deleted file mode 100644 index 06009f5f397fa..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto deleted file mode 100644 index 45a71da2f8dd3..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; - -import "envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; -option java_outer_classname = "RocketmqProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: RocketMQ Proxy] -// RocketMQ Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.rocketmq_proxy] - -message RocketmqProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The route table for the connection manager is specified in this property. - RouteConfiguration route_config = 2; - - // The largest duration transient object expected to live, more than 10s is recommended. - google.protobuf.Duration transient_object_life_span = 3; - - // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting - // facility without considering backward compatibility of exiting RocketMQ client SDK. - bool develop_mode = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto deleted file mode 100644 index 0925afef833d4..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto +++ /dev/null @@ -1,67 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rocketmq Proxy Route Configuration] -// Rocketmq Proxy :ref:`configuration overview `. - -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration"; - - // The name of the route configuration. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch"; - - // The name of the topic. - type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated config.route.v4alpha.HeaderMatcher headers = 2; -} - -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. - config.core.v4alpha.Metadata metadata_match = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/BUILD deleted file mode 100644 index 465ea4ff28449..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v4alpha:pkg", - "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/sni_dynamic_forward_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/sni_dynamic_forward_proxy.proto deleted file mode 100644 index de2947fcba9ec..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v4alpha/sni_dynamic_forward_proxy.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v4alpha; - -import "envoy/extensions/common/dynamic_forward_proxy/v4alpha/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v4alpha"; -option java_outer_classname = "SniDynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: SNI dynamic forward proxy] - -// Configuration for the SNI-based dynamic forward proxy filter. See the -// :ref:`architecture overview ` for -// more information. Note this filter must be configured along with -// :ref:`TLS inspector listener filter ` -// to work. -// [#extension: envoy.filters.network.sni_dynamic_forward_proxy] -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha.FilterConfig"; - - // The DNS cache configuration that the filter will attach to. Note this - // configuration must match that of associated :ref:`dynamic forward proxy - // cluster configuration - // `. - common.dynamic_forward_proxy.v4alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; - - oneof port_specifier { - // The port number to connect to the upstream. - uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD deleted file mode 100644 index 1b359dc7be526..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto deleted file mode 100644 index 95f2c26c888ca..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto +++ /dev/null @@ -1,154 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.tcp_proxy.v4alpha; - -import "envoy/config/accesslog/v4alpha/accesslog.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/v3/hash_policy.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha"; -option java_outer_classname = "TcpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: TCP Proxy] -// TCP Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.tcp_proxy] - -// [#next-free-field: 14] -message TcpProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; - - // Allows for specification of multiple upstream clusters along with weights - // that indicate the percentage of traffic to be forwarded to each cluster. - // The router selects an upstream cluster based on these weights. - message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster"; - - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; - - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When a request matches the route, the choice of an upstream cluster is - // determined by its weight. The sum of weights across all entries in the - // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what is set in this field will be considered - // for load balancing. Note that this will be merged with what's provided in - // :ref:`TcpProxy.metadata_match - // `, with values - // here taking precedence. The filter name should be specified as *envoy.lb*. - config.core.v4alpha.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Configuration for tunneling TCP over other transports or application layers. - // Tunneling is supported over both HTTP/1.1 and HTTP/2. Upstream protocol is - // determined by the cluster configuration. - message TunnelingConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; - - // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_len: 1}]; - - // Use POST method instead of CONNECT method to tunnel the TCP stream. - // The 'protocol: bytestream' header is also NOT set for HTTP/2 to comply with the spec. - // - // The upstream proxy is expected to convert POST payload as raw TCP. - bool use_post = 2; - - // Additional request headers to upstream proxy. This is mainly used to - // trigger upstream to convert POST requests back to CONNECT requests. - // - // Neither *:-prefixed* pseudo-headers nor the Host: header can be overridden. - repeated config.core.v4alpha.HeaderValueOption headers_to_add = 3 - [(validate.rules).repeated = {max_items: 1000}]; - } - - reserved 6; - - reserved "deprecated_v1"; - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - config.core.v4alpha.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set - // to 0s, the timeout will be disabled. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - google.protobuf.Duration idle_timeout = 8; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated config.accesslog.v4alpha.AccessLog access_log = 5; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based - // load balancing algorithms will select a host randomly. Currently the number of hash policies is - // limited to 1. - repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; - - // If set, this configures tunneling, e.g. configuration options to tunnel TCP payload over - // HTTP CONNECT. If this message is absent, the payload will be proxied upstream as per usual. - TunnelingConfig tunneling_config = 12; - - // The maximum duration of a connection. The duration is defined as the period since a connection - // was established. If not set, there is no max duration. When max_downstream_connection_duration - // is reached the connection will be closed. Duration must be at least 1ms. - google.protobuf.Duration max_downstream_connection_duration = 13 - [(validate.rules).duration = {gte {nanos: 1000000}}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/BUILD deleted file mode 100644 index a58bc9ebda546..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v4alpha:pkg", - "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/rate_limit.proto deleted file mode 100644 index ed2a33290268e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v4alpha/rate_limit.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v4alpha; - -import "envoy/config/ratelimit/v4alpha/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v4alpha"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.ratelimit] - -// [#next-free-field: 6] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3.RateLimit"; - - // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string = {min_len: 1}]; - - // Specifies the rate limit configuration stage. Each configured rate limit filter performs a - // rate limit check using descriptors configured in the - // :ref:`envoy_v3_api_msg_extensions.filters.network.thrift_proxy.v3.RouteAction` for the request. - // Only those entries with a matching stage number are used for a given filter. If not set, the - // default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 3; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 4; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v4alpha.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto index cf4c06ae1f19e..b79c9bc9619ea 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto @@ -81,11 +81,33 @@ message RouteMatch { repeated config.route.v3.HeaderMatcher headers = 4; } -// [#next-free-field: 7] +// [#next-free-field: 8] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction"; + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // .. note:: + // + // Shadowing will not be triggered if the primary cluster does not exist. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration when the route configuration is loaded. + // If it disappears at runtime, the shadow request will silently be ignored. + string cluster = 1 [(validate.rules).string = {min_len: 1}]; + + // If not specified, all requests to the target cluster will be mirrored. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + config.core.v3.RuntimeFractionalPercent runtime_fraction = 2; + } + oneof cluster_specifier { option (validate.required) = true; @@ -123,6 +145,9 @@ message RouteAction { // Strip the service prefix from the method name, if there's a prefix. For // example, the method call Service:method would end up being just method. bool strip_service_name = 5; + + // Indicates that the route has request mirroring policies. + repeated RequestMirrorPolicy request_mirror_policies = 7; } // Allows for specification of multiple upstream clusters along with weights that indicate the diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD deleted file mode 100644 index 995c04093a7da..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/route/v4alpha:pkg", - "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto deleted file mode 100644 index e638e9b8a2be8..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto +++ /dev/null @@ -1,158 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/route/v4alpha/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Thrift Proxy Route Configuration] -// Thrift Proxy :ref:`configuration overview `. - -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration"; - - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch"; - - oneof match_specifier { - option (validate.required) = true; - - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. - string method_name = 1; - - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. - string service_name = 2; - } - - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. - bool invert = 3; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. - repeated config.route.v4alpha.HeaderMatcher headers = 4; -} - -// [#next-free-field: 7] -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.RouteAction"; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates a single upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 2; - - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. - string cluster_header = 6 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. - config.core.v4alpha.Metadata metadata_match = 3; - - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". - repeated config.route.v4alpha.RateLimit rate_limits = 4; - - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. - bool strip_service_name = 5; -} - -// Allows for specification of multiple upstream clusters along with weights that indicate the -// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster -// based on these weights. -message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster"; - - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; - - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When a request matches the route, the choice of an upstream cluster is determined by its - // weight. The sum of weights across all entries in the clusters array determines the total - // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field, combined with what's - // provided in :ref:`RouteAction's metadata_match - // `, - // will be considered. Values here will take precedence. Keys and values should be provided - // under the "envoy.lb" metadata key. - config.core.v4alpha.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto deleted file mode 100644 index de399582869a0..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto +++ /dev/null @@ -1,140 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.v4alpha; - -import "envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; -option java_outer_classname = "ThriftProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Thrift Proxy] -// Thrift Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.thrift_proxy] - -// Thrift transport types supported by Envoy. -enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. - // For upstream connections, the Thrift proxy will use same transport as the downstream - // connection. - AUTO_TRANSPORT = 0; - - // The Thrift proxy will use the Thrift framed transport. - FRAMED = 1; - - // The Thrift proxy will use the Thrift unframed transport. - UNFRAMED = 2; - - // The Thrift proxy will assume the client is using the Thrift header transport. - HEADER = 3; -} - -// Thrift Protocol types supported by Envoy. -enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. - // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol - // detection. For upstream connections, the Thrift proxy will use the same protocol as the - // downstream connection. - AUTO_PROTOCOL = 0; - - // The Thrift proxy will use the Thrift binary protocol. - BINARY = 1; - - // The Thrift proxy will use Thrift non-strict binary protocol. - LAX_BINARY = 2; - - // The Thrift proxy will use the Thrift compact protocol. - COMPACT = 3; - - // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. - TWITTER = 4; -} - -// [#next-free-field: 8] -message ThriftProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy"; - - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - // [#extension-category: envoy.thrift_proxy.filters] - repeated ThriftFilter thrift_filters = 5; - - // If set to true, Envoy will try to skip decode data after metadata in the Thrift message. - // This mode will only work if the upstream and downstream protocols are the same and the transport - // is the same, the transport type is framed and the protocol is not Twitter. Otherwise Envoy will - // fallback to decode the data. - bool payload_passthrough = 6; - - // Optional maximum requests for a single downstream connection. If not specified, there is no limit. - google.protobuf.UInt32Value max_requests_per_connection = 7; -} - -// ThriftFilter configures a Thrift filter. -message ThriftFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter"; - - reserved 2; - - reserved "config"; - - // The name of the filter to instantiate. The name must match a supported - // filter. The built-in filters are: - // - // [#comment:TODO(zuercher): Auto generate the following list] - // * :ref:`envoy.filters.thrift.router ` - // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - oneof config_type { - google.protobuf.Any typed_config = 3; - } -} - -// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in -// in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.thrift_proxy`. -message ThriftProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions"; - - // Supplies the type of transport that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_TRANSPORT`, - // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_PROTOCOL`, - // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD deleted file mode 100644 index 28c2427c4a495..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/data/dns/v4alpha:pkg", - "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto deleted file mode 100644 index 6957e58dbb068..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto +++ /dev/null @@ -1,84 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.udp.dns_filter.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/resolver.proto"; -import "envoy/data/dns/v4alpha/dns_table.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig"; - - // This message contains the configuration for the DNS Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig"; - - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v4alpha.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - config.core.v4alpha.DataSource external_dns_table = 2; - } - } - - // This message contains the configuration for the DNS Filter operating - // in a client context. This message will contain the timeouts, retry, - // and forwarding configuration for Envoy to make DNS requests to other - // resolvers - message ClientContextConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig"; - - // Sets the maximum time we will wait for the upstream query to complete - // We allow 5s for the upstream resolution to complete, so the minimum - // value here is 1. Note that the total latency for a failed query is the - // number of retries multiplied by the resolver_timeout. - google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - config.core.v4alpha.DnsResolutionConfig dns_resolution_config = 2; - - // Controls how many outstanding external lookup contexts the filter tracks. - // The context structure allows the filter to respond to every query even if the external - // resolution times out or is otherwise unsuccessful - uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration contains the data that the filter uses to respond - // to DNS requests. - ServerContextConfig server_config = 2; - - // Client context configuration controls Envoy's behavior when it must use external - // resolvers to answer a query. This object is optional and if omitted instructs - // the filter to resolve queries from the data in the server_config - ClientContextConfig client_config = 3; -} diff --git a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD new file mode 100644 index 0000000000000..ee92fb652582e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto b/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto new file mode 100644 index 0000000000000..9b110a4893812 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; + +package envoy.extensions.formatter.metadata.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.formatter.metadata.v3"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Formatter extension for printing various types of metadata] +// [#extension: envoy.formatter.metadata] + +// Metadata formatter extension implements METADATA command operator that +// prints all types of metadata. The first parameter taken by METADATA operator defines +// type of metadata. The following types of metadata are supported (case sensitive): +// +// * DYNAMIC +// * CLUSTER +// * ROUTE +// +// See :ref:`here ` for more information on access log configuration. + +// %METADATA(TYPE:NAMESPACE:KEY):Z% +// :ref:`Metadata ` info, +// where TYPE is type of metadata (see above for supported types), +// NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional +// lookup up key in the namespace with the option of specifying nested keys separated by ':', +// and Z is an optional parameter denoting string truncation up to Z characters long. +// The data will be logged as a JSON string. For example, for the following ROUTE metadata: +// +// ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` +// +// * %METADATA(ROUTE:com.test.my_filter)% will log: ``{"test_key": "foo", "test_object": {"inner_key": "bar"}}`` +// * %METADATA(ROUTE:com.test.my_filter:test_key)% will log: ``foo`` +// * %METADATA(ROUTE:com.test.my_filter:test_object)% will log: ``{"inner_key": "bar"}`` +// * %METADATA(ROUTE:com.test.my_filter:test_object:inner_key)% will log: ``bar`` +// * %METADATA(ROUTE:com.unknown_filter)% will log: ``-`` +// * %METADATA(ROUTE:com.test.my_filter:unknown_key)% will log: ``-`` +// * %METADATA(ROUTE:com.test.my_filter):25% will log (truncation at 25 characters): ``{"test_key": "foo", "test`` +// +// .. note:: +// +// For typed JSON logs, this operator renders a single value with string, numeric, or boolean type +// when the referenced key is a simple value. If the referenced key is a struct or list value, a +// JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum +// length is ignored. +// +// .. note:: +// +// METADATA(DYNAMIC:NAMESPACE:KEY):Z is equivalent to :ref:`DYNAMIC_METADATA(NAMESPACE:KEY):Z` +// METADATA(CLUSTER:NAMESPACE:KEY):Z is equivalent to :ref:`CLUSTER_METADATA(NAMASPACE:KEY):Z` + +message Metadata { +} diff --git a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD b/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD new file mode 100644 index 0000000000000..ee92fb652582e --- /dev/null +++ b/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto b/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto new file mode 100644 index 0000000000000..0eff4feb8f941 --- /dev/null +++ b/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package envoy.extensions.key_value.file_based.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.key_value.file_based.v3"; +option java_outer_classname = "ConfigProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: File Based Key Value Store storage plugin] + +// [#alpha:] +// [#extension: envoy.key_value.file_based] +// This is configuration to flush a key value store out to disk. +message FileBasedKeyValueStoreConfig { + // The filename to read the keys and values from, and write the keys and + // values to. + string filename = 1 [(validate.rules).string = {min_len: 1}]; + + // The interval at which the key value store should be flushed to the file. + google.protobuf.Duration flush_interval = 2; +} diff --git a/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/datadog.proto b/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/datadog.proto deleted file mode 100644 index f41c8added210..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/datadog/v4alpha/datadog.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.datadog.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.datadog.v4alpha"; -option java_outer_classname = "DatadogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Datadog tracer] - -// Configuration for the Datadog tracer. -// [#extension: envoy.tracers.datadog] -message DatadogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.DatadogConfig"; - - // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD deleted file mode 100644 index d500cc41da1fe..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto b/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto deleted file mode 100644 index 21455a974d3be..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.dynamic_ot.v4alpha; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.dynamic_ot.v4alpha"; -option java_outer_classname = "DynamicOtProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Dynamically loadable OpenTracing tracer] - -// DynamicOtConfig is used to dynamically load a tracer from a shared library -// that implements the `OpenTracing dynamic loading API -// `_. -// [#extension: envoy.tracers.dynamic_ot] -message DynamicOtConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.DynamicOtConfig"; - - // Dynamic library implementing the `OpenTracing API - // `_. - string library = 1 [(validate.rules).string = {min_len: 1}]; - - // The configuration to use when creating a tracer from the given dynamic - // library. - google.protobuf.Struct config = 2; -} diff --git a/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/BUILD deleted file mode 100644 index 8e63f3d426681..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto b/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto deleted file mode 100644 index c169d86e0ca07..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.lightstep.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.lightstep.v4alpha"; -option java_outer_classname = "LightstepProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: LightStep tracer] - -// Configuration for the LightStep tracer. -// [#extension: envoy.tracers.lightstep] -message LightstepConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.LightstepConfig"; - - // Available propagation modes - enum PropagationMode { - // Propagate trace context in the single header x-ot-span-context. - ENVOY = 0; - - // Propagate trace context using LightStep's native format. - LIGHTSTEP = 1; - - // Propagate trace context using the b3 format. - B3 = 2; - - // Propagation trace context using the w3 trace-context standard. - TRACE_CONTEXT = 3; - } - - // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // File containing the access token to the `LightStep - // `_ API. - string hidden_envoy_deprecated_access_token_file = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access token to the `LightStep `_ API. - config.core.v4alpha.DataSource access_token = 4; - - // Propagation modes to use by LightStep's tracer. - repeated PropagationMode propagation_modes = 3 - [(validate.rules).repeated = {items {enum {defined_only: true}}}]; -} diff --git a/generated_api_shadow/envoy/extensions/tracers/opencensus/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/tracers/opencensus/v4alpha/BUILD deleted file mode 100644 index cedd6b14bf887..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/opencensus/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", - ], -) diff --git a/generated_api_shadow/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto b/generated_api_shadow/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto deleted file mode 100644 index 792ff58454c9f..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.opencensus.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "opencensus/proto/trace/v1/trace_config.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.opencensus.v4alpha"; -option java_outer_classname = "OpencensusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: OpenCensus tracer] - -// Configuration for the OpenCensus tracer. -// [#next-free-field: 15] -// [#extension: envoy.tracers.opencensus] -message OpenCensusConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.OpenCensusConfig"; - - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - - reserved 7; - - // Configures tracing, e.g. the sampler, max number of annotations, etc. - .opencensus.proto.trace.v1.TraceConfig trace_config = 1; - - // Enables the stdout exporter if set to true. This is intended for debugging - // purposes. - bool stdout_exporter_enabled = 2; - - // Enables the Stackdriver exporter if set to true. The project_id must also - // be set. - bool stackdriver_exporter_enabled = 3; - - // The Cloud project_id to use for Stackdriver tracing. - string stackdriver_project_id = 4; - - // (optional) By default, the Stackdriver exporter will connect to production - // Stackdriver. If stackdriver_address is non-empty, it will instead connect - // to this address, which is in the gRPC format: - // https://github.com/grpc/grpc/blob/master/doc/naming.md - string stackdriver_address = 10; - - // (optional) The gRPC server that hosts Stackdriver tracing service. Only - // Google gRPC is supported. If :ref:`target_uri ` - // is not provided, the default production Stackdriver address will be used. - config.core.v4alpha.GrpcService stackdriver_grpc_service = 13; - - // Enables the Zipkin exporter if set to true. The url and service name must - // also be set. This is deprecated, prefer to use Envoy's :ref:`native Zipkin - // tracer `. - bool hidden_envoy_deprecated_zipkin_exporter_enabled = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans". This is - // deprecated, prefer to use Envoy's :ref:`native Zipkin tracer - // `. - string hidden_envoy_deprecated_zipkin_url = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or - // ocagent_grpc_service must also be set. - bool ocagent_exporter_enabled = 11; - - // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC - // format: https://github.com/grpc/grpc/blob/master/doc/naming.md - // [#comment:TODO: deprecate this field] - string ocagent_address = 12; - - // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. - // This is only used if the ocagent_address is left empty. - config.core.v4alpha.GrpcService ocagent_grpc_service = 14; - - // List of incoming trace context headers we will accept. First one found - // wins. - repeated TraceContext incoming_trace_context = 8; - - // List of outgoing trace context headers we will produce. - repeated TraceContext outgoing_trace_context = 9; -} diff --git a/generated_api_shadow/envoy/extensions/tracers/skywalking/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/tracers/skywalking/v4alpha/BUILD deleted file mode 100644 index 1d56979cc4660..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/skywalking/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/tracers/skywalking/v4alpha/skywalking.proto b/generated_api_shadow/envoy/extensions/tracers/skywalking/v4alpha/skywalking.proto deleted file mode 100644 index 37936faa61337..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/skywalking/v4alpha/skywalking.proto +++ /dev/null @@ -1,68 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.skywalking.v4alpha; - -import "envoy/config/core/v4alpha/grpc_service.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.skywalking.v4alpha"; -option java_outer_classname = "SkywalkingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: SkyWalking tracer] - -// Configuration for the SkyWalking tracer. Please note that if SkyWalking tracer is used as the -// provider of http tracer, then -// :ref:`start_child_span ` -// in the router must be set to true to get the correct topology and tracing data. Moreover, SkyWalking -// Tracer does not support SkyWalking extension header (``sw8-x``) temporarily. -// [#extension: envoy.tracers.skywalking] -message SkyWalkingConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.SkyWalkingConfig"; - - // SkyWalking collector service. - config.core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; - - ClientConfig client_config = 2; -} - -// Client config for SkyWalking tracer. -message ClientConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.ClientConfig"; - - // Service name for SkyWalking tracer. If this field is empty, then local service cluster name - // that configured by :ref:`Bootstrap node ` - // message's :ref:`cluster ` field or command line - // option :option:`--service-cluster` will be used. If both this field and local service cluster - // name are empty, ``EnvoyProxy`` is used as the service name by default. - string service_name = 1; - - // Service instance name for SkyWalking tracer. If this field is empty, then local service node - // that configured by :ref:`Bootstrap node ` - // message's :ref:`id ` field or command line option - // :option:`--service-node` will be used. If both this field and local service node are empty, - // ``EnvoyProxy`` is used as the instance name by default. - string instance_name = 2; - - // Authentication token config for SkyWalking. SkyWalking can use token authentication to secure - // that monitoring application data can be trusted. In current version, Token is considered as a - // simple string. - // [#comment:TODO(wbpcode): Get backend token through the SDS API.] - oneof backend_token_specifier { - // Inline authentication token string. - string backend_token = 3 [(udpa.annotations.sensitive) = true]; - } - - // Envoy caches the segment in memory when the SkyWalking backend service is temporarily unavailable. - // This field specifies the maximum number of segments that can be cached. If not specified, the - // default is 1024. - google.protobuf.UInt32Value max_cache_size = 4; -} diff --git a/generated_api_shadow/envoy/extensions/tracers/xray/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/tracers/xray/v4alpha/BUILD deleted file mode 100644 index 1d56979cc4660..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/xray/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/tracers/xray/v4alpha/xray.proto b/generated_api_shadow/envoy/extensions/tracers/xray/v4alpha/xray.proto deleted file mode 100644 index 649f294b4273b..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/xray/v4alpha/xray.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.xray.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.xray.v4alpha"; -option java_outer_classname = "XrayProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: AWS X-Ray Tracer Configuration] -// Configuration for AWS X-Ray tracer - -// [#extension: envoy.tracers.xray] -message XRayConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.XRayConfig"; - - message SegmentFields { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v3.XRayConfig.SegmentFields"; - - // The type of AWS resource, e.g. "AWS::AppMesh::Proxy". - string origin = 1; - - // AWS resource metadata dictionary. - // See: `X-Ray Segment Document documentation `__ - google.protobuf.Struct aws = 2; - } - - // The UDP endpoint of the X-Ray Daemon where the spans will be sent. - // If this value is not set, the default value of 127.0.0.1:2000 will be used. - config.core.v4alpha.SocketAddress daemon_endpoint = 1; - - // The name of the X-Ray segment. - string segment_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The location of a local custom sampling rules JSON file. - // For an example of the sampling rules see: - // `X-Ray SDK documentation - // `_ - config.core.v4alpha.DataSource sampling_rule_manifest = 3; - - // Optional custom fields to be added to each trace segment. - // see: `X-Ray Segment Document documentation - // `__ - SegmentFields segment_fields = 4; -} diff --git a/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/BUILD deleted file mode 100644 index aefd915ae0546..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto b/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto deleted file mode 100644 index f7e11e43ab829..0000000000000 --- a/generated_api_shadow/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto +++ /dev/null @@ -1,73 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.zipkin.v4alpha; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.zipkin.v4alpha"; -option java_outer_classname = "ZipkinProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Zipkin tracer] - -// Configuration for the Zipkin tracer. -// [#extension: envoy.tracers.zipkin] -// [#next-free-field: 7] -message ZipkinConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.ZipkinConfig"; - - // Available Zipkin collector endpoint versions. - enum CollectorEndpointVersion { - // Zipkin API v1, JSON over HTTP. - // [#comment: The default implementation of Zipkin client before this field is added was only v1 - // and the way user configure this was by not explicitly specifying the version. Consequently, - // before this is added, the corresponding Zipkin collector expected to receive v1 payload. - // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when - // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, - // since in Zipkin realm this v1 version is considered to be not preferable anymore.] - hidden_envoy_deprecated_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [ - deprecated = true, - (envoy.annotations.disallowed_by_default_enum) = true, - (envoy.annotations.deprecated_at_minor_version_enum) = "3.0" - ]; - - // Zipkin API v2, JSON over HTTP. - HTTP_JSON = 1; - - // Zipkin API v2, protobuf over HTTP. - HTTP_PROTO = 2; - - // [#not-implemented-hide:] - GRPC = 3; - } - - // The cluster manager cluster that hosts the Zipkin collectors. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - - // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be - // used. - CollectorEndpointVersion collector_endpoint_version = 5; - - // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors - // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. - string collector_hostname = 6; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD deleted file mode 100644 index 976cefd189cca..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/quic/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto deleted file mode 100644 index 9a5f096f56c7a..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.quic.v4alpha; - -import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha"; -option java_outer_classname = "QuicTransportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: quic transport] -// [#comment:#extension: envoy.transport_sockets.quic] - -// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. -message QuicDownstreamTransport { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport"; - - tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1 - [(validate.rules).message = {required: true}]; -} - -// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. -message QuicUpstreamTransport { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport"; - - tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v4alpha/BUILD deleted file mode 100644 index b160d85ddb5b1..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/starttls/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v4alpha/starttls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v4alpha/starttls.proto deleted file mode 100644 index d2a9dbeaf2ed4..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v4alpha/starttls.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.starttls.v4alpha; - -import "envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.starttls.v4alpha"; -option java_outer_classname = "StarttlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: StartTls] -// [#extension: envoy.transport_sockets.starttls] - -// StartTls transport socket addresses situations when a protocol starts in clear-text and -// negotiates an in-band switch to TLS. StartTls transport socket is protocol agnostic. In the -// case of downstream StartTls a network filter is required which understands protocol exchange -// and a state machine to signal to the StartTls transport socket when a switch to TLS is -// required. Similarly, upstream StartTls requires the owner of an upstream transport socket to -// manage the state machine necessary to properly coordinate negotiation with the upstream and -// signal to the transport socket when a switch to secure transport is required. - -// Configuration for a downstream StartTls transport socket. -// StartTls transport socket wraps two sockets: -// * raw_buffer socket which is used at the beginning of the session -// * TLS socket used when a protocol negotiates a switch to encrypted traffic. -message StartTlsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.starttls.v3.StartTlsConfig"; - - // (optional) Configuration for clear-text socket used at the beginning of the session. - raw_buffer.v3.RawBuffer cleartext_socket_config = 1; - - // Configuration for a downstream TLS socket. - transport_sockets.tls.v4alpha.DownstreamTlsContext tls_socket_config = 2 - [(validate.rules).message = {required: true}]; -} - -// Configuration for an upstream StartTls transport socket. -// StartTls transport socket wraps two sockets: -// * raw_buffer socket which is used at the beginning of the session -// * TLS socket used when a protocol negotiates a switch to encrypted traffic. -message UpstreamStartTlsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.starttls.v3.UpstreamStartTlsConfig"; - - // (optional) Configuration for clear-text socket used at the beginning of the session. - raw_buffer.v3.RawBuffer cleartext_socket_config = 1; - - // Configuration for an upstream TLS socket. - transport_sockets.tls.v4alpha.UpstreamTlsContext tls_socket_config = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/BUILD deleted file mode 100644 index fe393f574d0d9..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/common/tap/v4alpha:pkg", - "//envoy/extensions/transport_sockets/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto deleted file mode 100644 index 5e0efc403ab5d..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tap.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/extensions/common/tap/v4alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v4alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap] -// [#extension: envoy.transport_sockets.tap] - -// Configuration for tap transport socket. This wraps another transport socket, providing the -// ability to interpose and record in plain text any traffic that is surfaced to Envoy. -message Tap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tap.v3.Tap"; - - // Common configuration for the tap transport socket. - common.tap.v4alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; - - // The underlying transport socket being wrapped. - config.core.v4alpha.TransportSocket transport_socket = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto index 64b3f59dcb263..1a86020683507 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -11,6 +11,7 @@ import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -236,7 +237,27 @@ message TlsSessionTicketKeys { [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; } -// [#next-free-field: 13] +// Indicates a certificate to be obtained from a named CertificateProvider plugin instance. +// The plugin instances are defined in the client's bootstrap file. +// The plugin allows certificates to be fetched/refreshed over the network asynchronously with +// respect to the TLS handshake. +// [#not-implemented-hide:] +message CertificateProviderPluginInstance { + // Provider instance name. If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; +} + +// [#next-free-field: 14] message CertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CertificateValidationContext"; @@ -281,7 +302,20 @@ message CertificateValidationContext { // directory for any file moves to support rotation. This currently only // applies to dynamic secrets, when the *CertificateValidationContext* is // delivered via SDS. - config.core.v3.DataSource trusted_ca = 1; + // + // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. + // + // [#next-major-version: This field and watched_directory below should ideally be moved into a + // separate sub-message, since there's no point in specifying the latter field without this one.] + config.core.v3.DataSource trusted_ca = 1 + [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; + + // Certificate provider instance for fetching TLS certificates. + // + // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. + // [#not-implemented-hide:] + CertificateProviderPluginInstance ca_certificate_provider_instance = 13 + [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; // If specified, updates of a file-based *trusted_ca* source will be triggered // by this watch. This allows explicit control over the path watched, by diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto index 02287de5875fb..f680207955a8c 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -9,7 +9,7 @@ import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/migrate.proto"; +import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -125,12 +125,18 @@ message DownstreamTlsContext { } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 14] +// [#next-free-field: 15] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; // Config for Certificate provider to get certificates. This provider should allow certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // + // DEPRECATED: This message is not currently used, but if we ever do need it, we will want to + // move it out of CommonTlsContext and into common.proto, similar to the existing + // CertificateProviderPluginInstance message. + // + // [#not-implemented-hide:] message CertificateProvider { // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. @@ -151,6 +157,11 @@ message CommonTlsContext { // Similar to CertificateProvider above, but allows the provider instances to be configured on // the client side instead of being sent from the control plane. + // + // DEPRECATED: This message was moved outside of CommonTlsContext + // and now lives in common.proto. + // + // [#not-implemented-hide:] message CertificateProviderInstance { // Provider instance name. This name must be defined in the client's configuration (e.g., a // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config @@ -179,26 +190,20 @@ message CommonTlsContext { // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. - SdsSecretConfig validation_context_sds_secret_config = 2 [ - (validate.rules).message = {required: true}, - (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" - ]; + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; - // Certificate provider for fetching validation context. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. + // Certificate provider for fetching CA certs. This will populate the + // *default_validation_context.trusted_ca* field. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 3 - [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - // Certificate provider instance for fetching validation context. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. + // Certificate provider instance for fetching CA certs. This will populate the + // *default_validation_context.trusted_ca* field. // [#not-implemented-hide:] CertificateProviderInstance validation_context_certificate_provider_instance = 4 - [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; } reserved 5; @@ -212,6 +217,12 @@ message CommonTlsContext { // Only a single TLS certificate is supported in client contexts. In server contexts, the first // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is // used for clients that support ECDSA. + // + // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, + // and *tls_certificate_provider_instance* may be used. + // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's + // not legal to put a repeated field in a oneof. In the next major version, we should rework + // this to avoid this problem.] repeated TlsCertificate tls_certificates = 2; // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be @@ -220,18 +231,30 @@ message CommonTlsContext { // The same number and types of certificates as :ref:`tls_certificates ` // are valid in the the certificates fetched through this setting. // - // If :ref:`tls_certificates ` - // is non-empty, this field is ignored. + // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, + // and *tls_certificate_provider_instance* may be used. + // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's + // not legal to put a repeated field in a oneof. In the next major version, we should rework + // this to avoid this problem.] repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 2}]; + // Certificate provider instance for fetching TLS certs. + // + // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, + // and *tls_certificate_provider_instance* may be used. + // [#not-implemented-hide:] + CertificateProviderPluginInstance tls_certificate_provider_instance = 14; + // Certificate provider for fetching TLS certificates. // [#not-implemented-hide:] - CertificateProvider tls_certificate_certificate_provider = 9; + CertificateProvider tls_certificate_certificate_provider = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Certificate provider instance for fetching TLS certificates. // [#not-implemented-hide:] - CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; oneof validation_context_type { // How to validate peer certificates. @@ -252,11 +275,13 @@ message CommonTlsContext { // Certificate provider for fetching validation context. // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 10; + CertificateProvider validation_context_certificate_provider = 10 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Certificate provider instance for fetching validation context. // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 12; + CertificateProviderInstance validation_context_certificate_provider_instance = 12 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; } // Supplies the list of ALPN protocols that the listener should expose. In diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD deleted file mode 100644 index 0cf3219ca2cdc..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto deleted file mode 100644 index e696fffc5e57d..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/common.proto +++ /dev/null @@ -1,404 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.TlsParameters"; - - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for - // servers. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). - // - // If not specified, a default list will be used. Defaults are different for server (downstream) and - // client (upstream) TLS configurations. - // - // In non-FIPS builds, the default server cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default server cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In non-FIPS builds, the default client cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // - // In builds using :ref:`BoringSSL FIPS `, the default client cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider"; - - reserved 2; - - reserved "config"; - - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_len: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 8] -message TlsCertificate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.TlsCertificate"; - - // The TLS certificate chain. - // - // If *certificate_chain* is a filesystem path, a watch will be added to the - // parent directory for any file moves to support rotation. This currently - // only applies to dynamic secrets, when the *TlsCertificate* is delivered via - // SDS. - config.core.v4alpha.DataSource certificate_chain = 1; - - // The TLS private key. - // - // If *private_key* is a filesystem path, a watch will be added to the parent - // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *TlsCertificate* is delivered via SDS. - config.core.v4alpha.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // If specified, updates of file-based *certificate_chain* and *private_key* - // sources will be triggered by this watch. The certificate/key pair will be - // read together and validated for atomic read consistency (i.e. no - // intervening modification occurred between cert/key read, verified by file - // hash comparisons). This allows explicit control over the path watched, by - // default the parent directories of the filesystem paths in - // *certificate_chain* and *private_key* are watched if this field is not - // specified. This only applies when a *TlsCertificate* is delivered by SDS - // with references to filesystem paths. See the :ref:`SDS key rotation - // ` documentation for further details. - config.core.v4alpha.WatchedDirectory watched_directory = 7; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // The OCSP response to be stapled with this certificate during the handshake. - // The response must be DER-encoded and may only be provided via ``filename`` or - // ``inline_bytes``. The response may pertain to only one certificate. - config.core.v4alpha.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated config.core.v4alpha.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys"; - - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated config.core.v4alpha.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 13] -message CertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext"; - - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - reserved 4, 5; - - reserved "verify_subject_alt_name"; - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. Note - // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be - // provided for all certificate authorities in that chain. Failure to do so will result in - // verification failure for both revoked and unrevoked certificates from that chain. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - // - // If *trusted_ca* is a filesystem path, a watch will be added to the parent - // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *CertificateValidationContext* is - // delivered via SDS. - config.core.v4alpha.DataSource trusted_ca = 1; - - // If specified, updates of a file-based *trusted_ca* source will be triggered - // by this watch. This allows explicit control over the path watched, by - // default the parent directory of the filesystem path in *trusted_ca* is - // watched if this field is not specified. This only applies when a - // *CertificateValidationContext* is delivered by SDS with references to - // filesystem paths. See the :ref:`SDS key rotation ` - // documentation for further details. - config.core.v4alpha.WatchedDirectory watched_directory = 11; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative name matchers. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matchers. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. Note that if a CRL is provided - // for any certificate authority in a trust chain, a CRL must be provided - // for all certificate authorities in that chain. Failure to do so will - // result in verification failure for both revoked and unrevoked certificates - // from that chain. - config.core.v4alpha.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; - - // The configuration of an extension specific certificate validator. - // If specified, all validation is done by the specified validator, - // and the behavior of all other validation settings is defined by the specified validator (and may be entirely ignored, unused, and unvalidated). - // Refer to the documentation for the specified validator. If you do not want a custom validation algorithm, do not set this field. - // [#extension-category: envoy.tls.cert_validator] - config.core.v4alpha.TypedExtensionConfig custom_validator_config = 12; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto deleted file mode 100644 index 5bb8c86b94385..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/config_source.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "SecretProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Secrets configuration] - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; - - // Name by which the secret can be uniquely referred to. When both name and config are specified, - // then secret can be fetched and/or reloaded via SDS. When only name is specified, then secret - // will be loaded from static resources. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - config.core.v4alpha.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto deleted file mode 100644 index b92cae619dd9c..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto +++ /dev/null @@ -1,282 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v4alpha; - -import "envoy/config/core/v4alpha/extension.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; -import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "TlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: TLS transport socket] -// [#extension: envoy.transport_sockets.tls] -// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 9] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; - - enum OcspStaplePolicy { - // OCSP responses are optional. If an OCSP response is absent - // or expired, the associated certificate will be used for - // connections without an OCSP staple. - LENIENT_STAPLING = 0; - - // OCSP responses are optional. If an OCSP response is absent, - // the associated certificate will be used without an - // OCSP staple. If a response is provided but is expired, - // the associated certificate will not be used for - // subsequent connections. If no suitable certificate is found, - // the connection is rejected. - STRICT_STAPLING = 1; - - // OCSP responses are required. Configuration will fail if - // a certificate is provided without an OCSP response. If a - // response expires, the associated certificate will not be - // used connections. If no suitable certificate is found, the - // connection is rejected. - MUST_STAPLE = 2; - } - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; - - // Config for whether to use certificates if they do not have - // an accompanying OCSP response or if the response expires at runtime. - // Defaults to LENIENT_STAPLING - OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 14] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; - - // Config for Certificate provider to get certificates. This provider should allow certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - message CertificateProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider"; - - // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Provider specific config. - // Note: an implementation is expected to dedup multiple instances of the same config - // to maintain a single certificate-provider instance. The sharing can happen, for - // example, among multiple clusters or between the tls_certificate and validation_context - // certificate providers of a cluster. - // This config could be supplied inline or (in future) a named xDS resource. - oneof config { - option (validate.required) = true; - - config.core.v4alpha.TypedExtensionConfig typed_config = 2; - } - } - - // Similar to CertificateProvider above, but allows the provider instances to be configured on - // the client side instead of being sent from the control plane. - message CertificateProviderInstance { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance"; - - // Provider instance name. This name must be defined in the client's configuration (e.g., a - // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config - // field that would be sent in the CertificateProvider message if the config was sent by the - // control plane). If not present, defaults to "default". - // - // Instance names should generally be defined not in terms of the underlying provider - // implementation (e.g., "file_watcher") but rather in terms of the function of the - // certificates (e.g., "foo_deployment_identity"). - string instance_name = 1; - - // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "example.com" to specify a certificate for a - // particular domain. Not all provider instances will actually use this field, so the value - // defaults to the empty string. - string certificate_name = 2; - } - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." - "CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - oneof dynamic_validation_context { - // Config for fetching validation context via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - - // Certificate provider for fetching validation context. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. - // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 3; - - // Certificate provider instance for fetching validation context. - // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, - // or validation_context_certificate_provider_instance may be used. - // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 4; - } - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // - // The same number and types of certificates as :ref:`tls_certificates ` - // are valid in the the certificates fetched through this setting. - // - // If :ref:`tls_certificates ` - // is non-empty, this field is ignored. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 2}]; - - // Certificate provider for fetching TLS certificates. - // [#not-implemented-hide:] - CertificateProvider tls_certificate_certificate_provider = 9; - - // Certificate provider instance for fetching TLS certificates. - // [#not-implemented-hide:] - CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - - // Certificate provider for fetching validation context. - // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 10; - - // Certificate provider instance for fetching validation context. - // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 12; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; - - // Custom TLS handshaker. If empty, defaults to native TLS handshaking - // behavior. - config.core.v4alpha.TypedExtensionConfig custom_handshaker = 13; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls_spiffe_validator_config.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls_spiffe_validator_config.proto deleted file mode 100644 index 8191318930be6..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/tls_spiffe_validator_config.proto +++ /dev/null @@ -1,66 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; -option java_outer_classname = "TlsSpiffeValidatorConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: SPIFFE Certificate Validator] -// [#extension: envoy.tls.cert_validator.spiffe] - -// Configuration specific to the `SPIFFE `_ certificate validator. -// -// Example: -// -// .. validated-code-block:: yaml -// :type-name: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext -// -// custom_validator_config: -// name: envoy.tls.cert_validator.spiffe -// typed_config: -// "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig -// trust_domains: -// - name: foo.com -// trust_bundle: -// filename: "foo.pem" -// - name: envoy.com -// trust_bundle: -// filename: "envoy.pem" -// -// In this example, a presented peer certificate whose SAN matches `spiffe//foo.com/**` is validated against -// the "foo.pem" x.509 certificate. All the trust bundles are isolated from each other, so no trust domain can mint -// a SVID belonging to another trust domain. That means, in this example, a SVID signed by `envoy.com`'s CA with `spiffe//foo.com/**` -// SAN would be rejected since Envoy selects the trust bundle according to the presented SAN before validate the certificate. -// -// Note that SPIFFE validator inherits and uses the following options from :ref:`CertificateValidationContext `. -// -// - :ref:`allow_expired_certificate ` to allow expired certificates. -// - :ref:`match_subject_alt_names ` to match **URI** SAN of certificates. Unlike the default validator, SPIFFE validator only matches **URI** SAN (which equals to SVID in SPIFFE terminology) and ignore other SAN types. -// -message SPIFFECertValidatorConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig"; - - message TrustDomain { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.TrustDomain"; - - // Name of the trust domain, `example.com`, `foo.bar.gov` for example. - // Note that this must *not* have "spiffe://" prefix. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Specify a data source holding x.509 trust bundle used for validating incoming SVID(s) in this trust domain. - config.core.v4alpha.DataSource trust_bundle = 2; - } - - // This field specifies trust domains used for validating incoming X.509-SVID(s). - repeated TrustDomain trust_domains = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto index f99f4059166b5..271dcfbe49cec 100644 --- a/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto +++ b/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto @@ -77,6 +77,9 @@ message HttpProtocolOptions { // If this is used, the cluster can use either of the configured protocols, and // will use whichever protocol was used by the downstream connection. + // + // If HTTP/3 is configured for downstream and not configured for upstream, + // HTTP/3 requests will fail over to HTTP/2. message UseDownstreamHttpConfig { config.core.v3.Http1ProtocolOptions http_protocol_options = 1; diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/v4alpha/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/v4alpha/BUILD deleted file mode 100644 index 3b00c0d6e6f2f..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/v4alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/extensions/upstreams/http/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/v4alpha/http_protocol_options.proto b/generated_api_shadow/envoy/extensions/upstreams/http/v4alpha/http_protocol_options.proto deleted file mode 100644 index 10971c2587f04..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/v4alpha/http_protocol_options.proto +++ /dev/null @@ -1,161 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.v4alpha; - -import "envoy/config/core/v4alpha/protocol.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.v4alpha"; -option java_outer_classname = "HttpProtocolOptionsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: HTTP Protocol Options] -// [#extension: envoy.upstreams.http.http_protocol_options] - -// HttpProtocolOptions specifies Http upstream protocol options. This object -// is used in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.extensions.upstreams.http.v3.HttpProtocolOptions`. -// -// This controls what protocol(s) should be used for upstream and how said protocol(s) are configured. -// -// This replaces the prior pattern of explicit protocol configuration directly -// in the cluster. So a configuration like this, explicitly configuring the use of HTTP/2 upstream: -// -// .. code:: -// -// clusters: -// - name: some_service -// connect_timeout: 5s -// upstream_http_protocol_options: -// auto_sni: true -// common_http_protocol_options: -// idle_timeout: 1s -// http2_protocol_options: -// max_concurrent_streams: 100 -// .... [further cluster config] -// -// Would now look like this: -// -// .. code:: -// -// clusters: -// - name: some_service -// connect_timeout: 5s -// typed_extension_protocol_options: -// envoy.extensions.upstreams.http.v3.HttpProtocolOptions: -// "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions -// upstream_http_protocol_options: -// auto_sni: true -// common_http_protocol_options: -// idle_timeout: 1s -// explicit_http_config: -// http2_protocol_options: -// max_concurrent_streams: 100 -// .... [further cluster config] -// [#next-free-field: 6] -message HttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.upstreams.http.v3.HttpProtocolOptions"; - - // If this is used, the cluster will only operate on one of the possible upstream protocols. - // Note that HTTP/2 or above should generally be used for upstream gRPC clusters. - message ExplicitHttpConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.upstreams.http.v3.HttpProtocolOptions.ExplicitHttpConfig"; - - oneof protocol_config { - option (validate.required) = true; - - config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 2; - - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - config.core.v4alpha.Http3ProtocolOptions http3_protocol_options = 3; - } - } - - // If this is used, the cluster can use either of the configured protocols, and - // will use whichever protocol was used by the downstream connection. - message UseDownstreamHttpConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.upstreams.http.v3.HttpProtocolOptions.UseDownstreamHttpConfig"; - - config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 2; - - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - config.core.v4alpha.Http3ProtocolOptions http3_protocol_options = 3; - } - - // If this is used, the cluster can use either HTTP/1 or HTTP/2, and will use whichever - // protocol is negotiated by ALPN with the upstream. - // Clusters configured with *AutoHttpConfig* will use the highest available - // protocol; HTTP/2 if supported, otherwise HTTP/1. - // If the upstream does not support ALPN, *AutoHttpConfig* will fail over to HTTP/1. - // This can only be used with transport sockets which support ALPN. Using a - // transport socket which does not support ALPN will result in configuration - // failure. The transport layer may be configured with custom ALPN, but the default ALPN - // for the cluster (or if custom ALPN fails) will be "h2,http/1.1". - message AutoHttpConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.extensions.upstreams.http.v3.HttpProtocolOptions.AutoHttpConfig"; - - config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 2; - - // Unlike HTTP/1 and HTTP/2, HTTP/3 will not be configured unless it is - // present, and (soon) only if there is an indication of server side - // support. - // See :ref:`here ` for more information on - // when HTTP/3 will be used, and when Envoy will fail over to TCP. - // - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - // AutoHttpConfig config is undergoing especially rapid change and as it - // is alpha is not guaranteed to be API-stable. - config.core.v4alpha.Http3ProtocolOptions http3_protocol_options = 3; - - // [#not-implemented-hide:] - // The presence of alternate protocols cache options causes the use of the - // alternate protocols cache, which is responsible for parsing and caching - // HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that - // advertise supporting it. - // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled. - config.core.v4alpha.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 4; - } - - // This contains options common across HTTP/1 and HTTP/2 - config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 1; - - // This contains common protocol options which are only applied upstream. - config.core.v4alpha.UpstreamHttpProtocolOptions upstream_http_protocol_options = 2; - - // This controls the actual protocol to be used upstream. - oneof upstream_protocol_options { - option (validate.required) = true; - - // To explicitly configure either HTTP/1 or HTTP/2 (but not both!) use *explicit_http_config*. - // If the *explicit_http_config* is empty, HTTP/1.1 is used. - ExplicitHttpConfig explicit_http_config = 3; - - // This allows switching on protocol based on what protocol the downstream - // connection used. - UseDownstreamHttpConfig use_downstream_protocol_config = 4; - - // This allows switching on protocol based on ALPN - AutoHttpConfig auto_config = 5; - } -} diff --git a/generated_api_shadow/envoy/service/accesslog/v4alpha/BUILD b/generated_api_shadow/envoy/service/accesslog/v4alpha/BUILD deleted file mode 100644 index 94c70bc66967b..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/data/accesslog/v3:pkg", - "//envoy/service/accesslog/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/accesslog/v4alpha/als.proto b/generated_api_shadow/envoy/service/accesslog/v4alpha/als.proto deleted file mode 100644 index ab0ba0e15213e..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v4alpha/als.proto +++ /dev/null @@ -1,87 +0,0 @@ -syntax = "proto3"; - -package envoy.service.accesslog.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/data/accesslog/v3/accesslog.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.accesslog.v4alpha"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Service for streaming access logs from Envoy to an access log server. -service AccessLogService { - // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different - // API for "critical" access logs in which Envoy will buffer access logs for some period of time - // until it gets an ACK so it could then retry. This API is designed for high throughput with the - // expectation that it might be lossy. - rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { - } -} - -// Empty response for the StreamAccessLogs API. Will never be sent. See below. -message StreamAccessLogsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsResponse"; -} - -// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream -// access logs without ever expecting a response. -message StreamAccessLogsMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsMessage.Identifier"; - - // The node sending the access log messages over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - - // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig - // `. - string log_name = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Wrapper for batches of HTTP access log entries. - message HTTPAccessLogEntries { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsMessage.HTTPAccessLogEntries"; - - repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Wrapper for batches of TCP access log entries. - message TCPAccessLogEntries { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v3.StreamAccessLogsMessage.TCPAccessLogEntries"; - - repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batches of log entries of a single type. Generally speaking, a given stream should only - // ever include one type of log entry. - oneof log_entries { - option (validate.required) = true; - - HTTPAccessLogEntries http_logs = 2; - - TCPAccessLogEntries tcp_logs = 3; - } -} diff --git a/generated_api_shadow/envoy/service/auth/v4alpha/BUILD b/generated_api_shadow/envoy/service/auth/v4alpha/BUILD deleted file mode 100644 index 5a172e093202e..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v4alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/auth/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/auth/v4alpha/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v4alpha/attribute_context.proto deleted file mode 100644 index eed7a2e704ad0..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v4alpha/attribute_context.proto +++ /dev/null @@ -1,177 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v4alpha; - -import "envoy/config/core/v4alpha/address.proto"; -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v4alpha"; -option java_outer_classname = "AttributeContextProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Attribute Context ] - -// See :ref:`network filter configuration overview ` -// and :ref:`HTTP filter configuration overview `. - -// An attribute is a piece of metadata that describes an activity on a network. -// For example, the size of an HTTP request, or the status code of an HTTP response. -// -// Each attribute has a type and a name, which is logically defined as a proto message field -// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes -// supported by Envoy authorization system. -// [#comment: The following items are left out of this proto -// Request.Auth field for jwt tokens -// Request.Api for api management -// Origin peer that originated the request -// Caching Protocol -// request_context return values to inject back into the filter chain -// peer.claims -- from X.509 extensions -// Configuration -// - field mask to send -// - which return values from request_context are copied back -// - which return values are copied into request_headers] -// [#next-free-field: 12] -message AttributeContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.AttributeContext"; - - // This message defines attributes for a node that handles a network request. - // The node can be either a service or an application that sends, forwards, - // or receives the request. Service peers should fill in the `service`, - // `principal`, and `labels` as appropriate. - // [#next-free-field: 6] - message Peer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.AttributeContext.Peer"; - - // The address of the peer, this is typically the IP address. - // It can also be UDS path, or others. - config.core.v4alpha.Address address = 1; - - // The canonical service name of the peer. - // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster - // ` - // If a more trusted source of the service name is available through mTLS/secure naming, it - // should be used. - string service = 2; - - // The labels associated with the peer. - // These could be pod labels for Kubernetes or tags for VMs. - // The source of the labels could be an X.509 certificate or other configuration. - map labels = 3; - - // The authenticated identity of this peer. - // For example, the identity associated with the workload such as a service account. - // If an X.509 certificate is used to assert the identity this field should be sourced from - // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. - // The primary identity should be the principal. The principal format is issuer specific. - // - // Example: - // * SPIFFE format is `spiffe://trust-domain/path` - // * Google account format is `https://accounts.google.com/{userid}` - string principal = 4; - - // The X.509 certificate used to authenticate the identify of this peer. - // When present, the certificate contents are encoded in URL and PEM format. - string certificate = 5; - } - - // Represents a network request, such as an HTTP request. - message Request { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.AttributeContext.Request"; - - // The timestamp when the proxy receives the first byte of the request. - google.protobuf.Timestamp time = 1; - - // Represents an HTTP request or an HTTP-like request. - HttpRequest http = 2; - } - - // This message defines attributes for an HTTP request. - // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 13] - message HttpRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.AttributeContext.HttpRequest"; - - // The unique ID for a request, which can be propagated to downstream - // systems. The ID should have low probability of collision - // within a single day for a specific service. - // For HTTP requests, it should be X-Request-ID or equivalent. - string id = 1; - - // The HTTP request method, such as `GET`, `POST`. - string method = 2; - - // The HTTP request headers. If multiple headers share the same key, they - // must be merged according to the HTTP spec. All header keys must be - // lower-cased, because HTTP header keys are case-insensitive. - map headers = 3; - - // The request target, as it appears in the first line of the HTTP request. This includes - // the URL path and query-string. No decoding is performed. - string path = 4; - - // The HTTP request `Host` or 'Authority` header value. - string host = 5; - - // The HTTP URL scheme, such as `http` and `https`. - string scheme = 6; - - // This field is always empty, and exists for compatibility reasons. The HTTP URL query is - // included in `path` field. - string query = 7; - - // This field is always empty, and exists for compatibility reasons. The URL fragment is - // not submitted as part of HTTP requests; it is unknowable. - string fragment = 8; - - // The HTTP request size in bytes. If unknown, it must be -1. - int64 size = 9; - - // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". - // - // See :repo:`headers.h:ProtocolStrings ` for a list of all - // possible values. - string protocol = 10; - - // The HTTP request body. - string body = 11; - - // The HTTP request body in bytes. This is used instead of - // :ref:`body ` when - // :ref:`pack_as_bytes ` - // is set to true. - bytes raw_body = 12; - } - - // The source of a network activity, such as starting a TCP connection. - // In a multi hop network activity, the source represents the sender of the - // last hop. - Peer source = 1; - - // The destination of a network activity, such as accepting a TCP connection. - // In a multi hop network activity, the destination represents the receiver of - // the last hop. - Peer destination = 2; - - // Represents a network request, such as an HTTP request. - Request request = 4; - - // This is analogous to http_request.headers, however these contents will not be sent to the - // upstream server. Context_extensions provide an extension mechanism for sending additional - // information to the auth server without modifying the proto definition. It maps to the - // internal opaque context in the filter chain. - map context_extensions = 10; - - // Dynamic metadata associated with the request. - config.core.v4alpha.Metadata metadata_context = 11; -} diff --git a/generated_api_shadow/envoy/service/auth/v4alpha/external_auth.proto b/generated_api_shadow/envoy/service/auth/v4alpha/external_auth.proto deleted file mode 100644 index f2a2cfe6c61c8..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v4alpha/external_auth.proto +++ /dev/null @@ -1,134 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/service/auth/v4alpha/attribute_context.proto"; -import "envoy/type/v3/http_status.proto"; - -import "google/protobuf/struct.proto"; -import "google/rpc/status.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v4alpha"; -option java_outer_classname = "ExternalAuthProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Authorization Service ] - -// The authorization service request messages used by external authorization :ref:`network filter -// ` and :ref:`HTTP filter `. - -// A generic interface for performing authorization check on incoming -// requests to a networked service. -service Authorization { - // Performs authorization check based on the attributes associated with the - // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse) { - } -} - -message CheckRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.CheckRequest"; - - // The request attributes. - AttributeContext attributes = 1; -} - -// HTTP attributes for a denied response. -message DeniedHttpResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.DeniedHttpResponse"; - - // This field allows the authorization service to send a HTTP response status - // code to the downstream client other than 403 (Forbidden). - type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client. Note that the :ref:`append field in HeaderValueOption ` defaults to - // false when used in this message. - repeated config.core.v4alpha.HeaderValueOption headers = 2; - - // This field allows the authorization service to send a response body data - // to the downstream client. - string body = 3; -} - -// HTTP attributes for an OK response. -// [#next-free-field: 7] -message OkHttpResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.OkHttpResponse"; - - // HTTP entity headers in addition to the original request headers. This allows the authorization - // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. Note that the :ref:`append field in HeaderValueOption ` defaults to - // false when used in this message. By setting the `append` field to `true`, - // the filter will append the correspondent header value to the matched request header. - // By leaving `append` as false, the filter will either add a new header, or override an existing - // one if there is a match. - repeated config.core.v4alpha.HeaderValueOption headers = 2; - - // HTTP entity headers to remove from the original request before dispatching - // it to the upstream. This allows the authorization service to act on auth - // related headers (like `Authorization`), process them, and consume them. - // Under this model, the upstream will either receive the request (if it's - // authorized) or not receive it (if it's not), but will not see headers - // containing authorization credentials. - // - // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as - // the header `Host`, may not be removed as that would make the request - // malformed. If mentioned in `headers_to_remove` these special headers will - // be ignored. - // - // When using the HTTP service this must instead be set by the HTTP - // authorization service as a comma separated list like so: - // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. - repeated string headers_to_remove = 5; - - // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata - // `. Until it is removed, - // setting this field overrides :ref:`CheckResponse.dynamic_metadata - // `. - google.protobuf.Struct hidden_envoy_deprecated_dynamic_metadata = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client on success. Note that the :ref:`append field in HeaderValueOption ` - // defaults to false when used in this message. - repeated config.core.v4alpha.HeaderValueOption response_headers_to_add = 6; -} - -// Intended for gRPC and Network Authorization servers `only`. -message CheckResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v3.CheckResponse"; - - // Status `OK` allows the request. Any other status indicates the request should be denied. - google.rpc.Status status = 1; - - // An message that contains HTTP response attributes. This message is - // used when the authorization service needs to send custom responses to the - // downstream client or, to modify/add request headers being dispatched to the upstream. - oneof http_response { - // Supplies http attributes for a denied response. - DeniedHttpResponse denied_response = 2; - - // Supplies http attributes for an ok response. - OkHttpResponse ok_response = 3; - } - - // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next - // filter. This metadata lives in a namespace specified by the canonical name of extension filter - // that requires it: - // - // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. - // - :ref:`envoy.filters.network.ext_authz ` for network filter. - google.protobuf.Struct dynamic_metadata = 4; -} diff --git a/generated_api_shadow/envoy/service/discovery/v4alpha/BUILD b/generated_api_shadow/envoy/service/discovery/v4alpha/BUILD deleted file mode 100644 index 2de065dc5b393..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/discovery/v4alpha/ads.proto b/generated_api_shadow/envoy/service/discovery/v4alpha/ads.proto deleted file mode 100644 index 41435811bd17f..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v4alpha/ads.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v4alpha; - -import "envoy/service/discovery/v4alpha/discovery.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v4alpha"; -option java_outer_classname = "AdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Aggregated Discovery Service (ADS)] - -// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, -// and listeners are retained in the package `envoy.api.v2` for backwards -// compatibility with existing management servers. New development in discovery -// services should proceed in the package `envoy.service.discovery.v2`. - -// See https://github.com/lyft/envoy-api#apis for a description of the role of -// ADS and how it is intended to be used by a management server. ADS requests -// have the same structure as their singleton xDS counterparts, but can -// multiplex many resource types on a single stream. The type_url in the -// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover -// the multiplexed singleton APIs at the Envoy instance and management server. -service AggregatedDiscoveryService { - // This is a gRPC-only API. - rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest) - returns (stream DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.AdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/discovery/v4alpha/discovery.proto b/generated_api_shadow/envoy/service/discovery/v4alpha/discovery.proto deleted file mode 100644 index bf8d48fc7a374..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v4alpha/discovery.proto +++ /dev/null @@ -1,286 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/rpc/status.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v4alpha"; -option java_outer_classname = "DiscoveryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common discovery API components] - -// A DiscoveryRequest requests a set of versioned resources of the same type for -// a given Envoy node on some API. -// [#next-free-field: 7] -message DiscoveryRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.DiscoveryRequest"; - - // The version_info provided in the request messages will be the version_info - // received with the most recent successfully processed response or empty on - // the first request. It is expected that no new request is sent after a - // response is received until the Envoy instance is ready to ACK/NACK the new - // configuration. ACK/NACK takes place by returning the new API config version - // as applied or the previous API config version respectively. Each type_url - // (see below) has an independent version associated with it. - string version_info = 1; - - // The node making the request. - config.core.v4alpha.Node node = 2; - - // List of resources to subscribe to, e.g. list of cluster names or a route - // configuration name. If this is empty, all resources for the API are - // returned. LDS/CDS may have empty resource_names, which will cause all - // resources for the Envoy instance to be returned. The LDS and CDS responses - // will then imply a number of resources that need to be fetched via EDS/RDS, - // which will be explicitly enumerated in resource_names. - repeated string resource_names = 3; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit - // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is - // required for ADS. - string type_url = 4; - - // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above - // discussion on version_info and the DiscoveryResponse nonce comment. This - // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, - // or 2) the client has not yet accepted an update in this xDS stream (unlike - // delta, where it is populated only for new explicit ACKs). - string response_nonce = 5; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* provides the Envoy - // internal exception related to the failure. It is only intended for consumption during manual - // debugging, the string provided is not guaranteed to be stable across Envoy versions. - google.rpc.Status error_detail = 6; -} - -// [#next-free-field: 7] -message DiscoveryResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.DiscoveryResponse"; - - // The version of the response data. - string version_info = 1; - - // The response resources. These resources are typed and depend on the API being called. - repeated google.protobuf.Any resources = 2; - - // [#not-implemented-hide:] - // Canary is used to support two Envoy command line flags: - // - // * --terminate-on-canary-transition-failure. When set, Envoy is able to - // terminate if it detects that configuration is stuck at canary. Consider - // this example sequence of updates: - // - Management server applies a canary config successfully. - // - Management server rolls back to a production config. - // - Envoy rejects the new production config. - // Since there is no sensible way to continue receiving configuration - // updates, Envoy will then terminate and apply production config from a - // clean slate. - // * --dry-run-canary. When set, a canary response will never be applied, only - // validated via a dry run. - bool canary = 3; - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). - string type_url = 4; - - // For gRPC based subscriptions, the nonce provides a way to explicitly ack a - // specific DiscoveryResponse in a following DiscoveryRequest. Additional - // messages may have been sent by Envoy to the management server for the - // previous version on the stream prior to this DiscoveryResponse, that were - // unprocessed at response send time. The nonce allows the management server - // to ignore any further DiscoveryRequests for the previous version until a - // DiscoveryRequest bearing the nonce. The nonce is optional and is not - // required for non-stream based xDS implementations. - string nonce = 5; - - // The control plane instance that sent the response. - config.core.v4alpha.ControlPlane control_plane = 6; -} - -// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC -// endpoint for Delta xDS. -// -// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full -// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a -// diff to the state of a xDS client. -// In Delta XDS there are per-resource versions, which allow tracking state at -// the resource granularity. -// An xDS Delta session is always in the context of a gRPC bidirectional -// stream. This allows the xDS server to keep track of the state of xDS clients -// connected to it. -// -// In Delta xDS the nonce field is required and used to pair -// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. -// Optionally, a response message level system_version_info is present for -// debugging purposes only. -// -// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest -// can be either or both of: [1] informing the server of what resources the -// client has gained/lost interest in (using resource_names_subscribe and -// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from -// the server (using response_nonce, with presence of error_detail making it a NACK). -// Additionally, the first message (for a given type_url) of a reconnected gRPC stream -// has a third role: informing the server of the resources (and their versions) -// that the client already possesses, using the initial_resource_versions field. -// -// As with state-of-the-world, when multiple resource types are multiplexed (ADS), -// all requests/acknowledgments/updates are logically walled off by type_url: -// a Cluster ACK exists in a completely separate world from a prior Route NACK. -// In particular, initial_resource_versions being sent at the "start" of every -// gRPC stream actually entails a message for each type_url, each with its own -// initial_resource_versions. -// [#next-free-field: 8] -message DeltaDiscoveryRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.DeltaDiscoveryRequest"; - - // The node making the request. - config.core.v4alpha.Node node = 1; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if - // resources are only referenced via *xds_resource_subscribe* and - // *xds_resources_unsubscribe*. - string type_url = 2; - - // DeltaDiscoveryRequests allow the client to add or remove individual - // resources to the set of tracked resources in the context of a stream. - // All resource names in the resource_names_subscribe list are added to the - // set of tracked resources and all resource names in the resource_names_unsubscribe - // list are removed from the set of tracked resources. - // - // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or - // resource_names_unsubscribe list simply means that no resources are to be - // added or removed to the resource list. - // *Like* state-of-the-world xDS, the server must send updates for all tracked - // resources, but can also send updates for resources the client has not subscribed to. - // - // NOTE: the server must respond with all resources listed in resource_names_subscribe, - // even if it believes the client has the most recent version of them. The reason: - // the client may have dropped them, but then regained interest before it had a chance - // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. - // - // These two fields can be set in any DeltaDiscoveryRequest, including ACKs - // and initial_resource_versions. - // - // A list of Resource names to add to the list of tracked resources. - repeated string resource_names_subscribe = 3; - - // A list of Resource names to remove from the list of tracked resources. - repeated string resource_names_unsubscribe = 4; - - // Informs the server of the versions of the resources the xDS client knows of, to enable the - // client to continue the same logical xDS session even in the face of gRPC stream reconnection. - // It will not be populated: [1] in the very first stream of a session, since the client will - // not yet have any resources, [2] in any message after the first in a stream (for a given - // type_url), since the server will already be correctly tracking the client's state. - // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) - // The map's keys are names of xDS resources known to the xDS client. - // The map's values are opaque resource versions. - map initial_resource_versions = 5; - - // When the DeltaDiscoveryRequest is a ACK or NACK message in response - // to a previous DeltaDiscoveryResponse, the response_nonce must be the - // nonce in the DeltaDiscoveryResponse. - // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. - string response_nonce = 6; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* - // provides the Envoy internal exception related to the failure. - google.rpc.Status error_detail = 7; -} - -// [#next-free-field: 8] -message DeltaDiscoveryResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.DeltaDiscoveryResponse"; - - // The version of the response data (used for debugging). - string system_version_info = 1; - - // The response resources. These are typed resources, whose types must match - // the type_url field. - repeated Resource resources = 2; - - // field id 3 IS available! - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. - string type_url = 4; - - // Resources names of resources that have be deleted and to be removed from the xDS Client. - // Removed resources for missing resources can be ignored. - repeated string removed_resources = 6; - - // The nonce provides a way for DeltaDiscoveryRequests to uniquely - // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - string nonce = 5; - - // [#not-implemented-hide:] - // The control plane instance that sent the response. - config.core.v4alpha.ControlPlane control_plane = 7; -} - -// [#next-free-field: 8] -message Resource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.Resource"; - - // Cache control properties for the resource. - // [#not-implemented-hide:] - message CacheControl { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v3.Resource.CacheControl"; - - // If true, xDS proxies may not cache this resource. - // Note that this does not apply to clients other than xDS proxies, which must cache resources - // for their own use, regardless of the value of this field. - bool do_not_cache = 1; - } - - // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; - - // The aliases are a list of other names that this resource can go by. - repeated string aliases = 4; - - // The resource level version. It allows xDS to track the state of individual - // resources. - string version = 1; - - // The resource being tracked. - google.protobuf.Any resource = 2; - - // Time-to-live value for the resource. For each resource, a timer is started. The timer is - // reset each time the resource is received with a new TTL. If the resource is received with - // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the - // configuration for the resource will be removed. - // - // The TTL can be refreshed or changed by sending a response that doesn't change the resource - // version. In this case the resource field does not need to be populated, which allows for - // light-weight "heartbeat" updates to keep a resource with a TTL alive. - // - // The TTL feature is meant to support configurations that should be removed in the event of - // a management server failure. For example, the feature may be used for fault injection - // testing where the fault injection should be terminated in the event that Envoy loses contact - // with the management server. - google.protobuf.Duration ttl = 6; - - // Cache control properties for the resource. - // [#not-implemented-hide:] - CacheControl cache_control = 7; -} diff --git a/generated_api_shadow/envoy/service/endpoint/v3/leds.proto b/generated_api_shadow/envoy/service/endpoint/v3/leds.proto new file mode 100644 index 0000000000000..89172f487eba0 --- /dev/null +++ b/generated_api_shadow/envoy/service/endpoint/v3/leds.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.service.endpoint.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +import "envoy/annotations/resource.proto"; +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; +option java_outer_classname = "LedsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#not-implemented-hide:] +// [#protodoc-title: LEDS] +// Locality-Endpoint discovery +// [#comment:TODO(adisuissa): Link to unified matching docs: +// :ref:`architecture overview`] + +service LocalityEndpointDiscoveryService { + option (envoy.annotations.resource).type = "envoy.config.endpoint.v3.LbEndpoint"; + + // State-of-the-World (DiscoveryRequest) and REST are not supported. + + // The resource_names_subscribe resource_names_unsubscribe fields in DeltaDiscoveryRequest + // specify a list of glob collections to subscribe to updates for. + rpc DeltaLocalityEndpoints(stream discovery.v3.DeltaDiscoveryRequest) + returns (stream discovery.v3.DeltaDiscoveryResponse) { + } +} + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message LedsDummy { +} diff --git a/generated_api_shadow/envoy/service/event_reporting/v4alpha/BUILD b/generated_api_shadow/envoy/service/event_reporting/v4alpha/BUILD deleted file mode 100644 index 7f342132a86d9..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/event_reporting/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/event_reporting/v4alpha/event_reporting_service.proto b/generated_api_shadow/envoy/service/event_reporting/v4alpha/event_reporting_service.proto deleted file mode 100644 index 6bff2a09c25ba..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v4alpha/event_reporting_service.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; - -package envoy.service.event_reporting.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.event_reporting.v4alpha"; -option java_outer_classname = "EventReportingServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: gRPC Event Reporting Service] - -// [#not-implemented-hide:] -// Service for streaming different types of events from Envoy to a server. The examples of -// such events may be health check or outlier detection events. -service EventReportingService { - // Envoy will connect and send StreamEventsRequest messages forever. - // The management server may send StreamEventsResponse to configure event stream. See below. - // This API is designed for high throughput with the expectation that it might be lossy. - rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { - } -} - -// [#not-implemented-hide:] -// An events envoy sends to the management server. -message StreamEventsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v3.StreamEventsRequest"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v3.StreamEventsRequest.Identifier"; - - // The node sending the event messages over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batch of events. When the stream is already active, it will be the events occurred - // since the last message had been sent. If the server receives unknown event type, it should - // silently ignore it. - // - // The following events are supported: - // - // * :ref:`HealthCheckEvent ` - // * :ref:`OutlierDetectionEvent ` - repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// [#not-implemented-hide:] -// The management server may send envoy a StreamEventsResponse to tell which events the server -// is interested in. In future, with aggregated event reporting service, this message will -// contain, for example, clusters the envoy should send events for, or event types the server -// wants to process. -message StreamEventsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v3.StreamEventsResponse"; -} diff --git a/generated_api_shadow/envoy/service/health/v3/hds.proto b/generated_api_shadow/envoy/service/health/v3/hds.proto index bb8781d5c3958..51266a64fa959 100644 --- a/generated_api_shadow/envoy/service/health/v3/hds.proto +++ b/generated_api_shadow/envoy/service/health/v3/hds.proto @@ -186,3 +186,8 @@ message HealthCheckSpecifier { // The default is 1 second. google.protobuf.Duration interval = 2; } + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message HdsDummy { +} diff --git a/generated_api_shadow/envoy/service/health/v4alpha/BUILD b/generated_api_shadow/envoy/service/health/v4alpha/BUILD deleted file mode 100644 index 37c2608f7c182..0000000000000 --- a/generated_api_shadow/envoy/service/health/v4alpha/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/cluster/v4alpha:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/service/health/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/health/v4alpha/hds.proto b/generated_api_shadow/envoy/service/health/v4alpha/hds.proto deleted file mode 100644 index 1b2446b109d8b..0000000000000 --- a/generated_api_shadow/envoy/service/health/v4alpha/hds.proto +++ /dev/null @@ -1,193 +0,0 @@ -syntax = "proto3"; - -package envoy.service.health.v4alpha; - -import "envoy/config/cluster/v4alpha/cluster.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/core/v4alpha/health_check.proto"; -import "envoy/config/endpoint/v3/endpoint_components.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.health.v4alpha"; -option java_outer_classname = "HdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Health Discovery Service (HDS)] - -// HDS is Health Discovery Service. It compliments Envoy’s health checking -// service by designating this Envoy to be a healthchecker for a subset of hosts -// in the cluster. The status of these health checks will be reported to the -// management server, where it can be aggregated etc and redistributed back to -// Envoy through EDS. -service HealthDiscoveryService { - // 1. Envoy starts up and if its can_healthcheck option in the static - // bootstrap config is enabled, sends HealthCheckRequest to the management - // server. It supplies its capabilities (which protocol it can health check - // with, what zone it resides in, etc.). - // 2. In response to (1), the management server designates this Envoy as a - // healthchecker to health check a subset of all upstream hosts for a given - // cluster (for example upstream Host 1 and Host 2). It streams - // HealthCheckSpecifier messages with cluster related configuration for all - // clusters this Envoy is designated to health check. Subsequent - // HealthCheckSpecifier message will be sent on changes to: - // a. Endpoints to health checks - // b. Per cluster configuration change - // 3. Envoy creates a health probe based on the HealthCheck config and sends - // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck - // configuration Envoy waits upon the arrival of the probe response and - // looks at the content of the response to decide whether the endpoint is - // healthy or not. If a response hasn't been received within the timeout - // interval, the endpoint health status is considered TIMEOUT. - // 4. Envoy reports results back in an EndpointHealthResponse message. - // Envoy streams responses as often as the interval configured by the - // management server in HealthCheckSpecifier. - // 5. The management Server collects health statuses for all endpoints in the - // cluster (for all clusters) and uses this information to construct - // EndpointDiscoveryResponse messages. - // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load - // balances traffic to them without additional health checking. It may - // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection - // failed to a particular endpoint to account for health status propagation - // delay between HDS and EDS). - // By default, can_healthcheck is true. If can_healthcheck is false, Cluster - // configuration may not contain HealthCheck message. - // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above - // invariant? - // TODO(htuch): Add @amb67's diagram. - rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) - returns (stream HealthCheckSpecifier) { - } - - // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of - // request/response. Should we add an identifier to the HealthCheckSpecifier - // to bind with the response? - rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { - option (google.api.http).post = "/v3/discovery:health_check"; - option (google.api.http).body = "*"; - } -} - -// Defines supported protocols etc, so the management server can assign proper -// endpoints to healthcheck. -message Capability { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.Capability"; - - // Different Envoy instances may have different capabilities (e.g. Redis) - // and/or have ports enabled for different protocols. - enum Protocol { - HTTP = 0; - TCP = 1; - REDIS = 2; - } - - repeated Protocol health_check_protocols = 1; -} - -message HealthCheckRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.HealthCheckRequest"; - - config.core.v4alpha.Node node = 1; - - Capability capability = 2; -} - -message EndpointHealth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.EndpointHealth"; - - config.endpoint.v3.Endpoint endpoint = 1; - - config.core.v4alpha.HealthStatus health_status = 2; -} - -// Group endpoint health by locality under each cluster. -message LocalityEndpointsHealth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.LocalityEndpointsHealth"; - - config.core.v4alpha.Locality locality = 1; - - repeated EndpointHealth endpoints_health = 2; -} - -// The health status of endpoints in a cluster. The cluster name and locality -// should match the corresponding fields in ClusterHealthCheck message. -message ClusterEndpointsHealth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.ClusterEndpointsHealth"; - - string cluster_name = 1; - - repeated LocalityEndpointsHealth locality_endpoints_health = 2; -} - -message EndpointHealthResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.EndpointHealthResponse"; - - // Deprecated - Flat list of endpoint health information. - repeated EndpointHealth hidden_envoy_deprecated_endpoints_health = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Organize Endpoint health information by cluster. - repeated ClusterEndpointsHealth cluster_endpoints_health = 2; -} - -message HealthCheckRequestOrEndpointHealthResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse"; - - oneof request_type { - HealthCheckRequest health_check_request = 1; - - EndpointHealthResponse endpoint_health_response = 2; - } -} - -message LocalityEndpoints { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.LocalityEndpoints"; - - config.core.v4alpha.Locality locality = 1; - - repeated config.endpoint.v3.Endpoint endpoints = 2; -} - -// The cluster name and locality is provided to Envoy for the endpoints that it -// health checks to support statistics reporting, logging and debugging by the -// Envoy instance (outside of HDS). For maximum usefulness, it should match the -// same cluster structure as that provided by EDS. -message ClusterHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.ClusterHealthCheck"; - - string cluster_name = 1; - - repeated config.core.v4alpha.HealthCheck health_checks = 2; - - repeated LocalityEndpoints locality_endpoints = 3; - - // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria ` - // on connection when health checking. For more details, see - // :ref:`config.cluster.v3.Cluster.transport_socket_matches `. - repeated config.cluster.v4alpha.Cluster.TransportSocketMatch transport_socket_matches = 4; -} - -message HealthCheckSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.health.v3.HealthCheckSpecifier"; - - repeated ClusterHealthCheck cluster_health_checks = 1; - - // The default is 1 second. - google.protobuf.Duration interval = 2; -} diff --git a/generated_api_shadow/envoy/service/load_stats/v4alpha/BUILD b/generated_api_shadow/envoy/service/load_stats/v4alpha/BUILD deleted file mode 100644 index 91d914645041b..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/service/load_stats/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/load_stats/v4alpha/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v4alpha/lrs.proto deleted file mode 100644 index f99b6555f4a17..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v4alpha/lrs.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -package envoy.service.load_stats.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/config/endpoint/v3/load_report.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.load_stats.v4alpha"; -option java_outer_classname = "LrsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Load Reporting service (LRS)] - -// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional -// stream with a management server. Upon connecting, the management server can send a -// :ref:`LoadStatsResponse ` to a node it is -// interested in getting the load reports for. Envoy in this node will start sending -// :ref:`LoadStatsRequest `. This is done periodically -// based on the :ref:`load reporting interval ` -// For details, take a look at the :ref:`Load Reporting Service sandbox example `. - -service LoadReportingService { - // Advanced API to allow for multi-dimensional load balancing by remote - // server. For receiving LB assignments, the steps are: - // 1, The management server is configured with per cluster/zone/load metric - // capacity configuration. The capacity configuration definition is - // outside of the scope of this document. - // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters - // to balance. - // - // Independently, Envoy will initiate a StreamLoadStats bidi stream with a - // management server: - // 1. Once a connection establishes, the management server publishes a - // LoadStatsResponse for all clusters it is interested in learning load - // stats about. - // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts - // based on per-zone weights and/or per-instance weights (if specified) - // based on intra-zone LbPolicy. This information comes from the above - // {Stream,Fetch}Endpoints. - // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. - // 4. Envoy aggregates load reports over the period of time given to it in - // LoadStatsResponse.load_reporting_interval. This includes aggregation - // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as - // well as load metrics from upstream hosts. - // 5. When the timer of load_reporting_interval expires, Envoy sends new - // LoadStatsRequest filled with load reports for each cluster. - // 6. The management server uses the load reports from all reported Envoys - // from around the world, computes global assignment and prepares traffic - // assignment destined for each zone Envoys are located in. Goto 2. - rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { - } -} - -// A load report Envoy sends to the management server. -message LoadStatsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.load_stats.v3.LoadStatsRequest"; - - // Node identifier for Envoy instance. - config.core.v4alpha.Node node = 1; - - // A list of load stats to report. - repeated config.endpoint.v3.ClusterStats cluster_stats = 2; -} - -// The management server sends envoy a LoadStatsResponse with all clusters it -// is interested in learning load stats about. -message LoadStatsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.load_stats.v3.LoadStatsResponse"; - - // Clusters to report stats for. - // Not populated if *send_all_clusters* is true. - repeated string clusters = 1; - - // If true, the client should send all clusters it knows about. - // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their - // :ref:`client_features` field will honor this field. - bool send_all_clusters = 4; - - // The minimum interval of time to collect stats over. This is only a minimum for two reasons: - // - // 1. There may be some delay from when the timer fires until stats sampling occurs. - // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - // that is observed in between the corresponding previous *LoadStatsRequest* and this - // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period - // of inobservability that might otherwise exists between the messages. New clusters are not - // subject to this consideration. - google.protobuf.Duration load_reporting_interval = 2; - - // Set to *true* if the management server supports endpoint granularity - // report. - bool report_endpoint_granularity = 3; -} diff --git a/generated_api_shadow/envoy/service/metrics/v4alpha/BUILD b/generated_api_shadow/envoy/service/metrics/v4alpha/BUILD deleted file mode 100644 index 285d31cf31d46..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/metrics/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@prometheus_metrics_model//:client_model", - ], -) diff --git a/generated_api_shadow/envoy/service/metrics/v4alpha/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v4alpha/metrics_service.proto deleted file mode 100644 index 5e1412f103e93..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v4alpha/metrics_service.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.service.metrics.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "io/prometheus/client/metrics.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.metrics.v4alpha"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Metrics service] - -// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric -// data model as a standard to represent metrics information. -service MetricsService { - // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. - rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { - } -} - -message StreamMetricsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v3.StreamMetricsResponse"; -} - -message StreamMetricsMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v3.StreamMetricsMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v3.StreamMetricsMessage.Identifier"; - - // The node sending metrics over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // A list of metric entries - repeated io.prometheus.client.MetricFamily envoy_metrics = 2; -} diff --git a/generated_api_shadow/envoy/service/status/v4alpha/BUILD b/generated_api_shadow/envoy/service/status/v4alpha/BUILD deleted file mode 100644 index 7c365494828dd..0000000000000 --- a/generated_api_shadow/envoy/service/status/v4alpha/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/admin/v4alpha:pkg", - "//envoy/annotations:pkg", - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/status/v3:pkg", - "//envoy/type/matcher/v4alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/status/v4alpha/csds.proto b/generated_api_shadow/envoy/service/status/v4alpha/csds.proto deleted file mode 100644 index 8a47045546f7f..0000000000000 --- a/generated_api_shadow/envoy/service/status/v4alpha/csds.proto +++ /dev/null @@ -1,194 +0,0 @@ -syntax = "proto3"; - -package envoy.service.status.v4alpha; - -import "envoy/admin/v4alpha/config_dump.proto"; -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/type/matcher/v4alpha/node.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.status.v4alpha"; -option java_outer_classname = "CsdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Client Status Discovery Service (CSDS)] - -// CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. It can -// also be used to get the current xDS states directly from the client. -service ClientStatusDiscoveryService { - rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { - } - - rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { - option (google.api.http).post = "/v3/discovery:client_status"; - option (google.api.http).body = "*"; - } -} - -// Status of a config from a management server view. -enum ConfigStatus { - // Status info is not available/unknown. - UNKNOWN = 0; - - // Management server has sent the config to client and received ACK. - SYNCED = 1; - - // Config is not sent. - NOT_SENT = 2; - - // Management server has sent the config to client but hasn’t received - // ACK/NACK. - STALE = 3; - - // Management server has sent the config to client but received NACK. The - // attached config dump will be the latest config (the rejected one), since - // it is the persisted version in the management server. - ERROR = 4; -} - -// Config status from a client-side view. -enum ClientConfigStatus { - // Config status is not available/unknown. - CLIENT_UNKNOWN = 0; - - // Client requested the config but hasn't received any config from management - // server yet. - CLIENT_REQUESTED = 1; - - // Client received the config and replied with ACK. - CLIENT_ACKED = 2; - - // Client received the config and replied with NACK. Notably, the attached - // config dump is not the NACKed version, but the most recent accepted one. If - // no config is accepted yet, the attached config dump will be empty. - CLIENT_NACKED = 3; -} - -// Request for client status of clients identified by a list of NodeMatchers. -message ClientStatusRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.ClientStatusRequest"; - - // Management server can use these match criteria to identify clients. - // The match follows OR semantics. - repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1; - - // The node making the csds request. - config.core.v4alpha.Node node = 2; -} - -// Detailed config (per xDS) with status. -// [#next-free-field: 8] -message PerXdsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.PerXdsConfig"; - - // Config status generated by management servers. Will not be present if the - // CSDS server is an xDS client. - ConfigStatus status = 1; - - // Client config status is populated by xDS clients. Will not be present if - // the CSDS server is an xDS server. No matter what the client config status - // is, xDS clients should always dump the most recent accepted xDS config. - // - // .. attention:: - // This field is deprecated. Use :ref:`ClientResourceStatus - // ` for per-resource - // config status instead. - ClientConfigStatus hidden_envoy_deprecated_client_status = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - oneof per_xds_config { - admin.v4alpha.ListenersConfigDump listener_config = 2; - - admin.v4alpha.ClustersConfigDump cluster_config = 3; - - admin.v4alpha.RoutesConfigDump route_config = 4; - - admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; - - admin.v4alpha.EndpointsConfigDump endpoint_config = 6; - } -} - -// All xds configs for a particular client. -message ClientConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.ClientConfig"; - - // GenericXdsConfig is used to specify the config status and the dump - // of any xDS resource identified by their type URL. It is the generalized - // version of the now deprecated ListenersConfigDump, ClustersConfigDump etc - // [#next-free-field: 10] - message GenericXdsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.ClientConfig.GenericXdsConfig"; - - // Type_url represents the fully qualified name of xDS resource type - // like envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. - string type_url = 1; - - // Name of the xDS resource - string name = 2; - - // This is the :ref:`version_info ` - // in the last processed xDS discovery response. If there are only - // static bootstrap listeners, this field will be "" - string version_info = 3; - - // The xDS resource config. Actual content depends on the type - google.protobuf.Any xds_config = 4; - - // Timestamp when the xDS resource was last updated - google.protobuf.Timestamp last_updated = 5; - - // Per xDS resource config status. It is generated by management servers. - // It will not be present if the CSDS server is an xDS client. - ConfigStatus config_status = 6; - - // Per xDS resource status from the view of a xDS client - admin.v4alpha.ClientResourceStatus client_status = 7; - - // Set if the last update failed, cleared after the next successful - // update. The *error_state* field contains the rejected version of - // this particular resource along with the reason and timestamp. For - // successfully updated or acknowledged resource, this field should - // be empty. - // [#not-implemented-hide:] - admin.v4alpha.UpdateFailureState error_state = 8; - - // Is static resource is true if it is specified in the config supplied - // through the file at the startup. - bool is_static_resource = 9; - } - - // Node for a particular client. - config.core.v4alpha.Node node = 1; - - // This field is deprecated in favor of generic_xds_configs which is - // much simpler and uniform in structure. - repeated PerXdsConfig hidden_envoy_deprecated_xds_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Represents generic xDS config and the exact config structure depends on - // the type URL (like Cluster if it is CDS) - repeated GenericXdsConfig generic_xds_configs = 3; -} - -message ClientStatusResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v3.ClientStatusResponse"; - - // Client configs for the clients specified in the ClientStatusRequest. - repeated ClientConfig config = 1; -} diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/BUILD b/generated_api_shadow/envoy/service/tap/v4alpha/BUILD deleted file mode 100644 index cb89a6907d9ab..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/data/tap/v3:pkg", - "//envoy/service/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto b/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto deleted file mode 100644 index 4ef38d1bae983..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v4alpha/tap.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; -import "envoy/data/tap/v3/wrapper.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Tap Sink Service] - -// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call -// StreamTaps to deliver captured taps to the server -service TapSinkService { - // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. - rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { - } -} - -// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server -// and stream taps without ever expecting a response. -message StreamTapsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v3.StreamTapsRequest"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v3.StreamTapsRequest.Identifier"; - - // The node sending taps over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - - // The opaque identifier that was set in the :ref:`output config - // `. - string tap_id = 2; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // The trace id. this can be used to merge together a streaming trace. Note that the trace_id - // is not guaranteed to be spatially or temporally unique. - uint64 trace_id = 2; - - // The trace data. - data.tap.v3.TraceWrapper trace = 3; -} - -// [#not-implemented-hide:] -message StreamTapsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v3.StreamTapsResponse"; -} diff --git a/generated_api_shadow/envoy/service/trace/v4alpha/BUILD b/generated_api_shadow/envoy/service/trace/v4alpha/BUILD deleted file mode 100644 index df379cbe9d5da..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v4alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v4alpha:pkg", - "//envoy/service/trace/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", - ], -) diff --git a/generated_api_shadow/envoy/service/trace/v4alpha/trace_service.proto b/generated_api_shadow/envoy/service/trace/v4alpha/trace_service.proto deleted file mode 100644 index 4cfdbbe576df9..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v4alpha/trace_service.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.service.trace.v4alpha; - -import "envoy/config/core/v4alpha/base.proto"; - -import "opencensus/proto/trace/v1/trace.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.trace.v4alpha"; -option java_outer_classname = "TraceServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Trace service] - -// Service for streaming traces to server that consumes the trace data. It -// uses OpenCensus data model as a standard to represent trace information. -service TraceService { - // Envoy will connect and send StreamTracesMessage messages forever. It does - // not expect any response to be sent as nothing would be done in the case - // of failure. - rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { - } -} - -message StreamTracesResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v3.StreamTracesResponse"; -} - -message StreamTracesMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v3.StreamTracesMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v3.StreamTracesMessage.Identifier"; - - // The node sending the access log messages over the stream. - config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. - // As a performance optimization this will only be sent in the first message - // on the stream. - Identifier identifier = 1; - - // A list of Span entries - repeated opencensus.proto.trace.v1.Span spans = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto b/generated_api_shadow/envoy/type/matcher/v3/metadata.proto index 68710dc718546..de19a2f34dbd1 100644 --- a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto +++ b/generated_api_shadow/envoy/type/matcher/v3/metadata.proto @@ -101,4 +101,7 @@ message MetadataMatcher { // The MetadataMatcher is matched if the value retrieved by path is matched to this value. ValueMatcher value = 3 [(validate.rules).message = {required: true}]; + + // If true, the match result will be inverted. + bool invert = 4; } diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD b/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD deleted file mode 100644 index 37561e92662cf..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/http_inputs.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/http_inputs.proto deleted file mode 100644 index bd7758ad53fbf..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/http_inputs.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "HttpInputsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Common HTTP Inputs] - -// Match input indicates that matching should be done on a specific request header. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpRequestHeaderMatchInput { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.HttpRequestHeaderMatchInput"; - - // The request header to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicates that matching should be done on a specific request trailer. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpRequestTrailerMatchInput { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.HttpRequestTrailerMatchInput"; - - // The request trailer to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicating that matching should be done on a specific response header. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpResponseHeaderMatchInput { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.HttpResponseHeaderMatchInput"; - - // The response header to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicates that matching should be done on a specific response trailer. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpResponseTrailerMatchInput { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.HttpResponseTrailerMatchInput"; - - // The response trailer to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto deleted file mode 100644 index e61ba2754337b..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/metadata.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/value.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Metadata matcher] - -// MetadataMatcher provides a general interface to check if a given value is matched in -// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value -// from the Metadata and then check if it's matched to the specified value. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.filters.http.rbac: -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following MetadataMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to -// enforce access control based on dynamic metadata in a request. See :ref:`Permission -// ` and :ref:`Principal -// `. - -// [#next-major-version: MetadataMatcher should use StructMatcher] -message MetadataMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.MetadataMatcher"; - - // Specifies the segment in a path to retrieve value from Metadata. - // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that - // if the segment key refers to a list, it has to be the last segment in a path. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.MetadataMatcher.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto deleted file mode 100644 index a74bf808f05ae..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/node.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/string.proto"; -import "envoy/type/matcher/v4alpha/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Node matcher] - -// Specifies the way to match a Node. -// The match follows AND semantics. -message NodeMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.NodeMatcher"; - - // Specifies match criteria on the node id. - StringMatcher node_id = 1; - - // Specifies match criteria on the node metadata. - repeated StructMatcher node_metadatas = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto deleted file mode 100644 index b168af19ab50c..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/number.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/v3/range.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "NumberProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Number matcher] - -// Specifies the way to match a double value. -message DoubleMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.DoubleMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // If specified, the input double value must be in the range specified here. - // Note: The range is using half-open interval semantics [start, end). - v3.DoubleRange range = 1; - - // If specified, the input double value must be equal to the value specified here. - double exact = 2; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto deleted file mode 100644 index 9150939bf2eed..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/path.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "PathProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Path matcher] - -// Specifies the way to match a path on HTTP request. -message PathMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.PathMatcher"; - - oneof rule { - option (validate.required) = true; - - // The `path` must match the URL path portion of the :path header. The query and fragment - // string (if present) are removed in the URL path portion. - // For example, the path */data* will match the *:path* header */data#fragment?param=value*. - StringMatcher path = 1 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto deleted file mode 100644 index 523889b9d3f7c..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/regex.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "RegexProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Regex matcher] - -// A regex matcher designed for safety when used with untrusted input. -message RegexMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher"; - - // Google's `RE2 `_ regex engine. The regex string must adhere to - // the documented `syntax `_. The engine is designed - // to complete execution in linear time as well as limit the amount of memory used. - // - // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` - // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or - // complexity that a compiled regex can have before an exception is thrown or a warning is - // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and - // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). - // - // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, - // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented - // each time the program size exceeds the warn level threshold. - message GoogleRE2 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; - - // This field controls the RE2 "program size" which is a rough estimate of how complex a - // compiled regex is to evaluate. A regex that has a program size greater than the configured - // value will fail to compile. In this case, the configured max program size can be increased - // or the regex can be simplified. If not specified, the default is 100. - // - // This field is deprecated; regexp validation should be performed on the management server - // instead of being done by each individual client. - google.protobuf.UInt32Value hidden_envoy_deprecated_max_program_size = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - oneof engine_type { - option (validate.required) = true; - - // Google's RE2 regex engine. - GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; - } - - // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Describes how to match a string and then produce a new string using a regular -// expression and a substitution string. -message RegexMatchAndSubstitute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.RegexMatchAndSubstitute"; - - // The regular expression used to find portions of a string (hereafter called - // the "subject string") that should be replaced. When a new string is - // produced during the substitution operation, the new string is initially - // the same as the subject string, but then all matches in the subject string - // are replaced by the substitution string. If replacing all matches isn't - // desired, regular expression anchors can be used to ensure a single match, - // so as to replace just one occurrence of a pattern. Capture groups can be - // used in the pattern to extract portions of the subject string, and then - // referenced in the substitution string. - RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; - - // The string that should be substituted into matching portions of the - // subject string during a substitution operation to produce a new string. - // Capture groups in the pattern can be referenced in the substitution - // string. Note, however, that the syntax for referring to capture groups is - // defined by the chosen regular expression engine. Google's `RE2 - // `_ regular expression engine uses a - // backslash followed by the capture group number to denote a numbered - // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers - // to capture group 2. - string substitution = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto deleted file mode 100644 index f9fa48cd31956..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/string.proto +++ /dev/null @@ -1,78 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/regex.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "StringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: String matcher] - -// Specifies the way to match a string. -// [#next-free-field: 8] -message StringMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.StringMatcher"; - - reserved 4; - - reserved "regex"; - - oneof match_pattern { - option (validate.required) = true; - - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - string exact = 1; - - // The input string must have the prefix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_len: 1}]; - - // The input string must have the suffix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_len: 1}]; - - // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; - - // The input string must have the substring specified here. - // Note: empty contains match is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc.def* - string contains = 7 [(validate.rules).string = {min_len: 1}]; - } - - // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This - // has no effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. - bool ignore_case = 6; -} - -// Specifies a list of ways to match a string. -message ListStringMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.ListStringMatcher"; - - repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto deleted file mode 100644 index 328ac555bd810..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/struct.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/value.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Struct matcher] - -// StructMatcher provides a general interface to check if a given value is matched in -// google.protobuf.Struct. It uses `path` to retrieve the value -// from the struct and then check if it's matched to the specified value. -// -// For example, for the following Struct: -// -// .. code-block:: yaml -// -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following StructMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. -message StructMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.StructMatcher"; - - // Specifies the segment in a path to retrieve value from Struct. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.StructMatcher.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The StructMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto b/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto deleted file mode 100644 index 6e509d4601099..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v4alpha/value.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v4alpha; - -import "envoy/type/matcher/v4alpha/number.proto"; -import "envoy/type/matcher/v4alpha/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; -option java_outer_classname = "ValueProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// [#protodoc-title: Value matcher] - -// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. -// StructValue is not supported and is always not matched. -// [#next-free-field: 7] -message ValueMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ValueMatcher"; - - // NullMatch is an empty message to specify a null value. - message NullMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.v3.ValueMatcher.NullMatch"; - } - - // Specifies how to match a value. - oneof match_pattern { - option (validate.required) = true; - - // If specified, a match occurs if and only if the target value is a NullValue. - NullMatch null_match = 1; - - // If specified, a match occurs if and only if the target value is a double value and is - // matched to this field. - DoubleMatcher double_match = 2; - - // If specified, a match occurs if and only if the target value is a string value and is - // matched to this field. - StringMatcher string_match = 3; - - // If specified, a match occurs if and only if the target value is a bool value and is equal - // to this field. - bool bool_match = 4; - - // If specified, value match will be performed based on whether the path is referring to a - // valid primitive value in the metadata. If the path is referring to a non-primitive value, - // the result is always not matched. - bool present_match = 5; - - // If specified, a match occurs if and only if the target value is a list value and - // is matched to this field. - ListMatcher list_match = 6; - } -} - -// Specifies the way to match a list value. -message ListMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ListMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // If specified, at least one of the values in the list must match the value specified. - ValueMatcher one_of = 1; - } -} diff --git a/pytest.ini b/pytest.ini index 14991b78a5ed3..be97f8e60e6a7 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,4 +1,4 @@ [pytest] -addopts = -raq --ignore=tools/testing/external/*,__init__.py,testing/conf --color=yes --cov-append -p tools.testing.plugin --cov-config=.coveragerc -vv tools +addopts = -raq --ignore=tools/testing/external/*,__init__.py,testing/conf --color=yes --cov-append -p tools.testing.plugin --cov-config=.coveragerc -Werror -vv tools testpaths = tests diff --git a/source/common/api/BUILD b/source/common/api/BUILD index 60412c4513432..950ea63a165cc 100644 --- a/source/common/api/BUILD +++ b/source/common/api/BUILD @@ -18,6 +18,7 @@ envoy_cc_library( "//source/common/common:thread_lib", "//source/common/event:dispatcher_lib", "//source/common/network:socket_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/source/common/api/api_impl.cc b/source/common/api/api_impl.cc index 73de8f4a320c2..485fd5f50d8d2 100644 --- a/source/common/api/api_impl.cc +++ b/source/common/api/api_impl.cc @@ -3,6 +3,8 @@ #include #include +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + #include "source/common/common/thread.h" #include "source/common/event/dispatcher_impl.h" @@ -11,10 +13,12 @@ namespace Api { Impl::Impl(Thread::ThreadFactory& thread_factory, Stats::Store& store, Event::TimeSystem& time_system, Filesystem::Instance& file_system, - Random::RandomGenerator& random_generator, const ProcessContextOptRef& process_context, + Random::RandomGenerator& random_generator, + const envoy::config::bootstrap::v3::Bootstrap& bootstrap, + const ProcessContextOptRef& process_context, Buffer::WatermarkFactorySharedPtr watermark_factory) : thread_factory_(thread_factory), store_(store), time_system_(time_system), - file_system_(file_system), random_generator_(random_generator), + file_system_(file_system), random_generator_(random_generator), bootstrap_(bootstrap), process_context_(process_context), watermark_factory_(std::move(watermark_factory)) {} Event::DispatcherPtr Impl::allocateDispatcher(const std::string& name) { diff --git a/source/common/api/api_impl.h b/source/common/api/api_impl.h index 0bec3b866562d..9a9e1e3fad096 100644 --- a/source/common/api/api_impl.h +++ b/source/common/api/api_impl.h @@ -4,6 +4,7 @@ #include #include "envoy/api/api.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/timer.h" #include "envoy/filesystem/filesystem.h" #include "envoy/network/socket.h" @@ -19,6 +20,7 @@ class Impl : public Api { public: Impl(Thread::ThreadFactory& thread_factory, Stats::Store& store, Event::TimeSystem& time_system, Filesystem::Instance& file_system, Random::RandomGenerator& random_generator, + const envoy::config::bootstrap::v3::Bootstrap& bootstrap, const ProcessContextOptRef& process_context = absl::nullopt, Buffer::WatermarkFactorySharedPtr watermark_factory = nullptr); @@ -34,6 +36,7 @@ class Impl : public Api { TimeSource& timeSource() override { return time_system_; } Stats::Scope& rootScope() override { return store_; } Random::RandomGenerator& randomGenerator() override { return random_generator_; } + const envoy::config::bootstrap::v3::Bootstrap& bootstrap() const override { return bootstrap_; } ProcessContextOptRef processContext() override { return process_context_; } private: @@ -42,6 +45,7 @@ class Impl : public Api { Event::TimeSystem& time_system_; Filesystem::Instance& file_system_; Random::RandomGenerator& random_generator_; + const envoy::config::bootstrap::v3::Bootstrap& bootstrap_; ProcessContextOptRef process_context_; const Buffer::WatermarkFactorySharedPtr watermark_factory_; }; diff --git a/source/common/buffer/BUILD b/source/common/buffer/BUILD index 832d72c913128..c5ea9bd23730d 100644 --- a/source/common/buffer/BUILD +++ b/source/common/buffer/BUILD @@ -13,9 +13,11 @@ envoy_cc_library( srcs = ["watermark_buffer.cc"], hdrs = ["watermark_buffer.h"], deps = [ + "//envoy/http:stream_reset_handler_interface", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/runtime:runtime_features_lib", + "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", ], ) diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 8f7977f9c269c..12152a9273021 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -7,6 +7,7 @@ #include #include "envoy/buffer/buffer.h" +#include "envoy/http/stream_reset_handler.h" #include "source/common/common/assert.h" #include "source/common/common/non_copyable.h" @@ -842,39 +843,5 @@ class OwnedBufferFragmentImpl final : public BufferFragment, public InlineStorag using OwnedBufferFragmentImplPtr = std::unique_ptr; -/** - * A BufferMemoryAccountImpl tracks allocated bytes across associated buffers and - * slices that originate from those buffers, or are untagged and pass through an - * associated buffer. - */ -class BufferMemoryAccountImpl : public BufferMemoryAccount { -public: - BufferMemoryAccountImpl() = default; - ~BufferMemoryAccountImpl() override { ASSERT(buffer_memory_allocated_ == 0); } - - // Make not copyable - BufferMemoryAccountImpl(const BufferMemoryAccountImpl&) = delete; - BufferMemoryAccountImpl& operator=(const BufferMemoryAccountImpl&) = delete; - - // Make not movable. - BufferMemoryAccountImpl(BufferMemoryAccountImpl&&) = delete; - BufferMemoryAccountImpl& operator=(BufferMemoryAccountImpl&&) = delete; - - uint64_t balance() const { return buffer_memory_allocated_; } - void charge(uint64_t amount) override { - // Check overflow - ASSERT(std::numeric_limits::max() - buffer_memory_allocated_ >= amount); - buffer_memory_allocated_ += amount; - } - - void credit(uint64_t amount) override { - ASSERT(buffer_memory_allocated_ >= amount); - buffer_memory_allocated_ -= amount; - } - -private: - uint64_t buffer_memory_allocated_ = 0; -}; - } // namespace Buffer } // namespace Envoy diff --git a/source/common/buffer/watermark_buffer.cc b/source/common/buffer/watermark_buffer.cc index 781321f99dc44..734fed3fb0443 100644 --- a/source/common/buffer/watermark_buffer.cc +++ b/source/common/buffer/watermark_buffer.cc @@ -1,10 +1,22 @@ #include "source/common/buffer/watermark_buffer.h" +#include "watermark_buffer.h" + +#include +#include + +#include "envoy/buffer/buffer.h" #include "source/common/common/assert.h" +#include "source/common/common/logger.h" #include "source/common/runtime/runtime_features.h" namespace Envoy { namespace Buffer { +namespace { +// Effectively disables tracking as this should zero out all reasonable account +// balances when shifted by this amount. +constexpr uint32_t kEffectivelyDisableTrackingBitshift = 63; +} // end namespace void WatermarkBuffer::add(const void* data, uint64_t size) { OwnedImpl::add(data, size); @@ -136,5 +148,145 @@ void WatermarkBuffer::checkHighAndOverflowWatermarks() { } } +BufferMemoryAccountSharedPtr +WatermarkBufferFactory::createAccount(Http::StreamResetHandler& reset_handler) { + if (bitshift_ == kEffectivelyDisableTrackingBitshift) { + return nullptr; // No tracking + } + return BufferMemoryAccountImpl::createAccount(this, reset_handler); +} + +void WatermarkBufferFactory::updateAccountClass(const BufferMemoryAccountSharedPtr& account, + absl::optional current_class, + absl::optional new_class) { + ASSERT(current_class != new_class, "Expected the current_class and new_class to be different"); + + if (!current_class.has_value()) { + // Start tracking + ASSERT(new_class.has_value()); + ASSERT(!size_class_account_sets_[new_class.value()].contains(account)); + size_class_account_sets_[new_class.value()].insert(account); + } else if (!new_class.has_value()) { + // No longer track + ASSERT(current_class.has_value()); + ASSERT(size_class_account_sets_[current_class.value()].contains(account)); + size_class_account_sets_[current_class.value()].erase(account); + } else { + // Moving between buckets + ASSERT(size_class_account_sets_[current_class.value()].contains(account)); + ASSERT(!size_class_account_sets_[new_class.value()].contains(account)); + size_class_account_sets_[new_class.value()].insert( + std::move(size_class_account_sets_[current_class.value()].extract(account).value())); + } +} + +void WatermarkBufferFactory::unregisterAccount(const BufferMemoryAccountSharedPtr& account, + absl::optional current_class) { + if (current_class.has_value()) { + ASSERT(size_class_account_sets_[current_class.value()].contains(account)); + size_class_account_sets_[current_class.value()].erase(account); + } +} + +uint64_t WatermarkBufferFactory::resetAccountsGivenPressure(float pressure) { + ASSERT(pressure >= 0.0 && pressure <= 1.0, "Provided pressure is out of range [0, 1]."); + + // Compute buckets to clear + const uint32_t buckets_to_clear = std::min( + std::floor(pressure * BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_) + 1, 8); + uint32_t bucket_idx = BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_ - buckets_to_clear; + + ENVOY_LOG_MISC(warn, "resetting streams in buckets >= {}", bucket_idx); + uint64_t num_streams_reset = 0; + // TODO(kbaichoo): Add a limit to the number of streams we reset + // per-invocation of this function. + // Clear buckets + while (bucket_idx < BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_) { + ENVOY_LOG_MISC(warn, "resetting {} streams in bucket {}.", + size_class_account_sets_[bucket_idx].size(), bucket_idx); + + auto it = size_class_account_sets_[bucket_idx].begin(); + while (it != size_class_account_sets_[bucket_idx].end()) { + auto next = std::next(it); + // This will trigger an erase, which avoids rehashing and invalidates the + // iterator *it*. *next* is still valid. + (*it)->resetDownstream(); + it = next; + ++num_streams_reset; + } + + ++bucket_idx; + } + + return num_streams_reset; +} + +WatermarkBufferFactory::WatermarkBufferFactory( + const envoy::config::overload::v3::BufferFactoryConfig& config) + : bitshift_(config.minimum_account_to_track_power_of_two() + ? config.minimum_account_to_track_power_of_two() - 1 + : kEffectivelyDisableTrackingBitshift) {} + +WatermarkBufferFactory::~WatermarkBufferFactory() { + for (auto& account_set : size_class_account_sets_) { + ASSERT(account_set.empty(), + "Expected all Accounts to have unregistered from the Watermark Factory."); + } +} + +BufferMemoryAccountSharedPtr +BufferMemoryAccountImpl::createAccount(WatermarkBufferFactory* factory, + Http::StreamResetHandler& reset_handler) { + // We use shared_ptr ctor directly rather than make shared since the + // constructor being invoked is private as we want users to use this static + // method to createAccounts. + auto account = + std::shared_ptr(new BufferMemoryAccountImpl(factory, reset_handler)); + // Set shared_this_ in the account. + static_cast(account.get())->shared_this_ = account; + return account; +} + +absl::optional BufferMemoryAccountImpl::balanceToClassIndex() { + const uint64_t shifted_balance = buffer_memory_allocated_ >> factory_->bitshift(); + + if (shifted_balance == 0) { + return {}; // Not worth tracking anything < configured minimum threshold + } + + const int class_idx = absl::bit_width(shifted_balance) - 1; + return std::min(class_idx, NUM_MEMORY_CLASSES_ - 1); +} + +void BufferMemoryAccountImpl::updateAccountClass() { + auto new_class = balanceToClassIndex(); + if (shared_this_ && new_class != current_bucket_idx_) { + factory_->updateAccountClass(shared_this_, current_bucket_idx_, new_class); + current_bucket_idx_ = new_class; + } +} + +void BufferMemoryAccountImpl::credit(uint64_t amount) { + ASSERT(buffer_memory_allocated_ >= amount); + buffer_memory_allocated_ -= amount; + updateAccountClass(); +} + +void BufferMemoryAccountImpl::charge(uint64_t amount) { + // Check overflow + ASSERT(std::numeric_limits::max() - buffer_memory_allocated_ >= amount); + buffer_memory_allocated_ += amount; + updateAccountClass(); +} + +void BufferMemoryAccountImpl::clearDownstream() { + if (reset_handler_.has_value()) { + reset_handler_.reset(); + factory_->unregisterAccount(shared_this_, current_bucket_idx_); + current_bucket_idx_.reset(); + shared_this_ = nullptr; + } +} + } // namespace Buffer } // namespace Envoy diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h index 6afc4d8602323..95cd369c82c84 100644 --- a/source/common/buffer/watermark_buffer.h +++ b/source/common/buffer/watermark_buffer.h @@ -3,6 +3,10 @@ #include #include +#include "envoy/buffer/buffer.h" +#include "envoy/common/optref.h" +#include "envoy/config/overload/v3/overload.pb.h" + #include "source/common/buffer/buffer_impl.h" namespace Envoy { @@ -72,15 +76,150 @@ class WatermarkBuffer : public OwnedImpl { using WatermarkBufferPtr = std::unique_ptr; +class WatermarkBufferFactory; + +/** + * A BufferMemoryAccountImpl tracks allocated bytes across associated buffers and + * slices that originate from those buffers, or are untagged and pass through an + * associated buffer. + * + * This BufferMemoryAccount is produced by the *WatermarkBufferFactory*. + */ +class BufferMemoryAccountImpl : public BufferMemoryAccount { +public: + // Used to create the account, and complete wiring with the factory + // and shared_this_. + static BufferMemoryAccountSharedPtr createAccount(WatermarkBufferFactory* factory, + Http::StreamResetHandler& reset_handler); + ~BufferMemoryAccountImpl() override { + // The buffer_memory_allocated_ should always be zero on destruction, even + // if we triggered a reset of the downstream. This is because the destructor + // will only trigger when no entities have a pointer to the account, meaning + // any slices which charge and credit the account should have credited the + // account when they were deleted, maintaining this invariant. + ASSERT(buffer_memory_allocated_ == 0); + ASSERT(!reset_handler_.has_value()); + } + + // Make not copyable + BufferMemoryAccountImpl(const BufferMemoryAccountImpl&) = delete; + BufferMemoryAccountImpl& operator=(const BufferMemoryAccountImpl&) = delete; + + // Make not movable. + BufferMemoryAccountImpl(BufferMemoryAccountImpl&&) = delete; + BufferMemoryAccountImpl& operator=(BufferMemoryAccountImpl&&) = delete; + + uint64_t balance() const { return buffer_memory_allocated_; } + void charge(uint64_t amount) override; + void credit(uint64_t amount) override; + + // Clear the associated downstream, preparing the account to be destroyed. + // This is idempotent. + void clearDownstream() override; + + void resetDownstream() override { + if (reset_handler_.has_value()) { + reset_handler_->resetStream(Http::StreamResetReason::OverloadManager); + } + } + + // The number of memory classes the Account expects to exists. See + // *WatermarkBufferFactory* for details on the memory classes. + static constexpr uint32_t NUM_MEMORY_CLASSES_ = 8; + +private: + BufferMemoryAccountImpl(WatermarkBufferFactory* factory, Http::StreamResetHandler& reset_handler) + : factory_(factory), reset_handler_(reset_handler) {} + + // Returns the class index based off of the buffer_memory_allocated_ + // This can differ with current_bucket_idx_ if buffer_memory_allocated_ was + // just modified. + // Returned class index, if present, is in the range [0, NUM_MEMORY_CLASSES_). + absl::optional balanceToClassIndex(); + void updateAccountClass(); + + uint64_t buffer_memory_allocated_ = 0; + // Current bucket index where the account is being tracked in. + absl::optional current_bucket_idx_{}; + + WatermarkBufferFactory* factory_ = nullptr; + + OptRef reset_handler_; + // Keep a copy of the shared_ptr pointing to this account. We opted to go this + // route rather than enable_shared_from_this to avoid wasteful atomic + // operations e.g. when updating the tracking of the account. + // This is set through the createAccount static method which is the only way to + // instantiate an instance of this class. This should is cleared when + // unregistering from the factory. + BufferMemoryAccountSharedPtr shared_this_ = nullptr; +}; + +/** + * The WatermarkBufferFactory creates *WatermarkBuffer*s and + * *BufferMemoryAccountImpl* that can be used to bind to the created buffers + * from a given downstream (and corresponding upstream, if one exists). The + * accounts can then be used to reset the underlying stream. + * + * Any account produced by this factory might be tracked by the factory using the + * following scheme: + * + * 1) Is the account balance >= 1MB? If not don't track. + * 2) For all accounts above the minimum threshold for tracking, put the account + * into one of the *BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_* buckets. + * + * We keep buckets containing accounts within a "memory class", which are + * power of two buckets. For example, with a minimum threshold of 1MB, our + * first bucket contains [1MB, 2MB) accounts, the second bucket contains + * [2MB, 4MB), and so forth for + * *BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_* buckets. These buckets + * allow us to coarsely track accounts, and if overloaded we can easily + * target more expensive streams. + * + * As the account balance changes, the account informs the Watermark Factory + * if the bucket for that account has changed. See + * *BufferMemoryAccountImpl::balanceToClassIndex()* for details on the memory + * class for a given account balance. + * + * TODO(kbaichoo): Update this documentation when we make the minimum account + * threshold configurable. + * + */ class WatermarkBufferFactory : public WatermarkFactory { public: + WatermarkBufferFactory(const envoy::config::overload::v3::BufferFactoryConfig& config); + // Buffer::WatermarkFactory + ~WatermarkBufferFactory() override; InstancePtr createBuffer(std::function below_low_watermark, std::function above_high_watermark, std::function above_overflow_watermark) override { return std::make_unique(below_low_watermark, above_high_watermark, above_overflow_watermark); } + + BufferMemoryAccountSharedPtr createAccount(Http::StreamResetHandler& reset_handler) override; + uint64_t resetAccountsGivenPressure(float pressure) override; + + // Called by BufferMemoryAccountImpls created by the factory on account class + // updated. + void updateAccountClass(const BufferMemoryAccountSharedPtr& account, + absl::optional current_class, + absl::optional new_class); + + uint32_t bitshift() const { return bitshift_; } + + // Unregister a buffer memory account. + virtual void unregisterAccount(const BufferMemoryAccountSharedPtr& account, + absl::optional current_class); + +protected: + // Enable subclasses to inspect the mapping. + using MemoryClassesToAccountsSet = std::array, + BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_>; + MemoryClassesToAccountsSet size_class_account_sets_; + // How much to bit shift right balances to test whether the account should be + // tracked in *size_class_account_sets_*. + const uint32_t bitshift_; }; } // namespace Buffer diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 5614a214ba5a8..3ca8dfa105c9d 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -137,6 +137,17 @@ envoy_cc_library( hdrs = ["interval_value.h"], ) +envoy_cc_library( + name = "key_value_store_lib", + srcs = ["key_value_store_base.cc"], + hdrs = ["key_value_store_base.h"], + deps = [ + "//envoy/common:key_value_store_interface", + "//envoy/event:dispatcher_interface", + "//envoy/filesystem:filesystem_interface", + ], +) + envoy_cc_library( name = "linked_object", hdrs = ["linked_object.h"], @@ -301,6 +312,7 @@ envoy_cc_library( "//envoy/common:regex_interface", "//source/common/protobuf:utility_lib", "//source/common/stats:symbol_table_lib", + "@com_github_cncf_udpa//xds/type/matcher/v3:pkg_cc_proto", "@com_googlesource_code_re2//:re2", "@envoy_api//envoy/type/matcher/v3:pkg_cc_proto", ], diff --git a/source/common/common/key_value_store_base.cc b/source/common/common/key_value_store_base.cc new file mode 100644 index 0000000000000..e80ab8570268a --- /dev/null +++ b/source/common/common/key_value_store_base.cc @@ -0,0 +1,84 @@ +#include "source/common/common/key_value_store_base.h" + +namespace Envoy { +namespace { + +// Removes a length prefixed token from |contents| and returns the token, +// or returns absl::nullopt on failure. +absl::optional getToken(absl::string_view& contents, std::string& error) { + const auto it = contents.find("\n"); + if (it == contents.npos) { + error = "Bad file: no newline"; + return {}; + } + uint64_t length; + if (!absl::SimpleAtoi(contents.substr(0, it), &length)) { + error = "Bad file: no length"; + return {}; + } + contents.remove_prefix(it + 1); + if (contents.size() < length) { + error = "Bad file: insufficient contents"; + return {}; + } + absl::string_view token = contents.substr(0, length); + contents.remove_prefix(length); + return token; +} + +} // namespace + +KeyValueStoreBase::KeyValueStoreBase(Event::Dispatcher& dispatcher, + std::chrono::seconds flush_interval) + : flush_timer_(dispatcher.createTimer([this]() { flush(); })) { + flush_timer_->enableTimer(flush_interval); +} + +// Assuming |contents| is in the format +// [length]\n[key]\n[length]\n[value] +// parses contents into the provided store. +// This is best effort, and will return false on failure without clearing +// partially parsed data. +bool KeyValueStoreBase::parseContents(absl::string_view contents, + absl::flat_hash_map& store) const { + std::string error; + while (!contents.empty()) { + absl::optional key = getToken(contents, error); + absl::optional value; + if (key.has_value()) { + value = getToken(contents, error); + } + if (!key.has_value() || !value.has_value()) { + ENVOY_LOG(warn, error); + return false; + } + store.emplace(std::string(key.value()), std::string(value.value())); + } + return true; +} + +void KeyValueStoreBase::addOrUpdate(absl::string_view key, absl::string_view value) { + store_.erase(key); + store_.emplace(key, value); +} + +void KeyValueStoreBase::remove(absl::string_view key) { store_.erase(key); } + +absl::optional KeyValueStoreBase::get(absl::string_view key) { + auto it = store_.find(key); + if (it == store_.end()) { + return {}; + } + return it->second; +} + +void KeyValueStoreBase::iterate(ConstIterateCb cb) const { + for (const auto& [key, value] : store_) { + Iterate ret = cb(key, value); + if (ret == Iterate::Break) { + return; + } + } +} + +} // namespace Envoy diff --git a/source/common/common/key_value_store_base.h b/source/common/common/key_value_store_base.h new file mode 100644 index 0000000000000..c445e9f47bdde --- /dev/null +++ b/source/common/common/key_value_store_base.h @@ -0,0 +1,43 @@ +#pragma once + +#include "envoy/common/key_value_store.h" +#include "envoy/event/dispatcher.h" +#include "envoy/filesystem/filesystem.h" + +#include "source/common/common/logger.h" + +#include "absl/container/flat_hash_map.h" + +// TODO(alyssawilk) move to a common extension dir. +namespace Envoy { + +// This is the base implementation of the KeyValueStore. It handles the various +// functions other than flush(), which will be implemented by subclasses. +// +// Note this implementation HAS UNBOUNDED SIZE. +// It is assumed the callers manage the number of entries. Use with care. +class KeyValueStoreBase : public KeyValueStore, + public Logger::Loggable { +public: + // Sets up flush() for the configured interval. + KeyValueStoreBase(Event::Dispatcher& dispatcher, std::chrono::seconds flush_interval); + + // If |contents| is in the form of + // [length]\n[key][length]\n[value] + // parses key value pairs from |contents| into the store provided. + // Returns true on success and false on failure. + bool parseContents(absl::string_view contents, + absl::flat_hash_map& store) const; + std::string error; + // KeyValueStore + void addOrUpdate(absl::string_view key, absl::string_view value) override; + void remove(absl::string_view key) override; + absl::optional get(absl::string_view key) override; + void iterate(ConstIterateCb cb) const override; + +protected: + const Event::TimerPtr flush_timer_; + absl::flat_hash_map store_; +}; + +} // namespace Envoy diff --git a/source/common/common/logger.cc b/source/common/common/logger.cc index fa62bc23f2de2..bc4859fe8ee1b 100644 --- a/source/common/common/logger.cc +++ b/source/common/common/logger.cc @@ -22,6 +22,8 @@ StandardLogger::StandardLogger(const std::string& name) : Logger(std::make_shared(name, Registry::getSink())) {} SinkDelegate::SinkDelegate(DelegatingLogSinkSharedPtr log_sink) : log_sink_(log_sink) {} +void SinkDelegate::logWithStableName(absl::string_view, absl::string_view, absl::string_view, + absl::string_view) {} SinkDelegate::~SinkDelegate() { // The previous delegate should have never been set or should have been reset by now via diff --git a/source/common/common/logger.h b/source/common/common/logger.h index 03edb6b569303..fdae426b8acfa 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -37,6 +37,7 @@ namespace Logger { FUNCTION(connection) \ FUNCTION(conn_handler) \ FUNCTION(decompression) \ + FUNCTION(dns) \ FUNCTION(dubbo) \ FUNCTION(envoy_bug) \ FUNCTION(ext_authz) \ @@ -54,6 +55,7 @@ namespace Logger { FUNCTION(io) \ FUNCTION(jwt) \ FUNCTION(kafka) \ + FUNCTION(key_value_store) \ FUNCTION(lua) \ FUNCTION(main) \ FUNCTION(matcher) \ @@ -106,6 +108,8 @@ class SinkDelegate : NonCopyable { virtual ~SinkDelegate(); virtual void log(absl::string_view msg) PURE; + virtual void logWithStableName(absl::string_view stable_name, absl::string_view level, + absl::string_view component, absl::string_view msg); virtual void flush() PURE; protected: @@ -156,6 +160,12 @@ class DelegatingLogSink : public spdlog::sinks::sink { void setLock(Thread::BasicLockable& lock) { stderr_sink_->setLock(lock); } void clearLock() { stderr_sink_->clearLock(); } + template + void logWithStableName(absl::string_view stable_name, absl::string_view level, + absl::string_view component, Args... msg) { + absl::ReaderMutexLock sink_lock(&sink_mutex_); + sink_->logWithStableName(stable_name, level, component, fmt::format(msg...)); + } // spdlog::sinks::sink void log(const spdlog::details::log_msg& msg) override; void flush() override { @@ -444,6 +454,30 @@ class EscapeMessageJsonString : public spdlog::custom_flag_formatter { */ #define ENVOY_LOG(LEVEL, ...) ENVOY_LOG_TO_LOGGER(ENVOY_LOGGER(), LEVEL, ##__VA_ARGS__) +/** + * Log with a stable event name. This allows emitting a log line with a stable name in addition to + * the standard log line. The stable log line is passed to custom sinks that may want to intercept + * these log messages. + * + * By default these named logs are not handled, but a custom log sink may intercept them by + * implementing the logWithStableName function. + */ +#define ENVOY_LOG_EVENT(LEVEL, EVENT_NAME, ...) \ + ENVOY_LOG_EVENT_TO_LOGGER(ENVOY_LOGGER(), LEVEL, EVENT_NAME, ##__VA_ARGS__) + +#define ENVOY_LOG_EVENT_TO_LOGGER(LOGGER, LEVEL, EVENT_NAME, ...) \ + do { \ + ENVOY_LOG_TO_LOGGER(LOGGER, LEVEL, ##__VA_ARGS__); \ + if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) { \ + ::Envoy::Logger::Registry::getSink()->logWithStableName(EVENT_NAME, #LEVEL, (LOGGER).name(), \ + ##__VA_ARGS__); \ + } \ + } while (0) + +#define ENVOY_CONN_LOG_EVENT(LEVEL, EVENT_NAME, FORMAT, CONNECTION, ...) \ + ENVOY_LOG_EVENT_TO_LOGGER(ENVOY_LOGGER(), LEVEL, EVENT_NAME, "[C{}] " FORMAT, (CONNECTION).id(), \ + ##__VA_ARGS__); + #define ENVOY_LOG_FIRST_N_TO_LOGGER(LOGGER, LEVEL, N, ...) \ do { \ if (ENVOY_LOG_COMP_LEVEL(LOGGER, LEVEL)) { \ diff --git a/source/common/common/matchers.cc b/source/common/common/matchers.cc index ec93e4bbf11c1..f34a482ca7a35 100644 --- a/source/common/common/matchers.cc +++ b/source/common/common/matchers.cc @@ -23,7 +23,8 @@ ValueMatcherConstSharedPtr ValueMatcher::create(const envoy::type::matcher::v3:: case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kDoubleMatch: return std::make_shared(v.double_match()); case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kStringMatch: - return std::make_shared(v.string_match()); + return std::make_shared>>( + v.string_match()); case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kBoolMatch: return std::make_shared(v.bool_match()); case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kPresentMatch: @@ -63,65 +64,6 @@ bool DoubleMatcher::match(const ProtobufWkt::Value& value) const { }; } -StringMatcherImpl::StringMatcherImpl(const envoy::type::matcher::v3::StringMatcher& matcher) - : matcher_(matcher) { - if (matcher.match_pattern_case() == - envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSafeRegex) { - if (matcher.ignore_case()) { - throw EnvoyException("ignore_case has no effect for safe_regex."); - } - regex_ = Regex::Utility::parseRegex(matcher_.safe_regex()); - } else if (matcher.match_pattern_case() == - envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kContains) { - if (matcher_.ignore_case()) { - // Cache the lowercase conversion of the Contains matcher for future use - lowercase_contains_match_ = absl::AsciiStrToLower(matcher_.contains()); - } - } -} - -bool StringMatcherImpl::match(const ProtobufWkt::Value& value) const { - if (value.kind_case() != ProtobufWkt::Value::kStringValue) { - return false; - } - - return match(value.string_value()); -} - -bool StringMatcherImpl::match(const absl::string_view value) const { - switch (matcher_.match_pattern_case()) { - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact: - return matcher_.ignore_case() ? absl::EqualsIgnoreCase(value, matcher_.exact()) - : value == matcher_.exact(); - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kPrefix: - return matcher_.ignore_case() ? absl::StartsWithIgnoreCase(value, matcher_.prefix()) - : absl::StartsWith(value, matcher_.prefix()); - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSuffix: - return matcher_.ignore_case() ? absl::EndsWithIgnoreCase(value, matcher_.suffix()) - : absl::EndsWith(value, matcher_.suffix()); - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kContains: - return matcher_.ignore_case() - ? absl::StrContains(absl::AsciiStrToLower(value), lowercase_contains_match_) - : absl::StrContains(value, matcher_.contains()); - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex: - FALLTHRU; - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSafeRegex: - return regex_->match(value); - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } -} - -bool StringMatcherImpl::getCaseSensitivePrefixMatch(std::string& prefix) const { - if (matcher_.match_pattern_case() == - envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kPrefix && - !matcher_.ignore_case()) { - prefix = matcher_.prefix(); - return true; - } - return false; -} - ListMatcher::ListMatcher(const envoy::type::matcher::v3::ListMatcher& matcher) : matcher_(matcher) { ASSERT(matcher_.match_pattern_case() == envoy::type::matcher::v3::ListMatcher::MatchPatternCase::kOneOf); @@ -176,7 +118,7 @@ PathMatcher::createSafeRegex(const envoy::type::matcher::v3::RegexMatcher& regex bool MetadataMatcher::match(const envoy::config::core::v3::Metadata& metadata) const { const auto& value = Envoy::Config::Metadata::metadataValue(&metadata, matcher_.filter(), path_); - return value_matcher_ && value_matcher_->match(value); + return value_matcher_->match(value) ^ matcher_.invert(); } bool PathMatcher::match(const absl::string_view path) const { diff --git a/source/common/common/matchers.h b/source/common/common/matchers.h index 3bcf1a88bf12b..486d8dbb875dc 100644 --- a/source/common/common/matchers.h +++ b/source/common/common/matchers.h @@ -2,6 +2,7 @@ #include +#include "envoy/common/exception.h" #include "envoy/common/matchers.h" #include "envoy/common/regex.h" #include "envoy/config/core/v3/base.pb.h" @@ -11,9 +12,12 @@ #include "envoy/type/matcher/v3/string.pb.h" #include "envoy/type/matcher/v3/value.pb.h" +#include "source/common/common/regex.h" #include "source/common/common/utility.h" #include "source/common/protobuf/protobuf.h" +#include "absl/strings/match.h" + namespace Envoy { namespace Matchers { @@ -81,15 +85,55 @@ class UniversalStringMatcher : public StringMatcher { bool match(absl::string_view) const override { return true; } }; +template class StringMatcherImpl : public ValueMatcher, public StringMatcher { public: - explicit StringMatcherImpl(const envoy::type::matcher::v3::StringMatcher& matcher); + explicit StringMatcherImpl(const StringMatcherType& matcher) : matcher_(matcher) { + if (matcher.match_pattern_case() == StringMatcherType::MatchPatternCase::kSafeRegex) { + if (matcher.ignore_case()) { + ExceptionUtil::throwEnvoyException("ignore_case has no effect for safe_regex."); + } + regex_ = Regex::Utility::parseRegex(matcher_.safe_regex()); + } else if (matcher.match_pattern_case() == StringMatcherType::MatchPatternCase::kContains) { + if (matcher_.ignore_case()) { + // Cache the lowercase conversion of the Contains matcher for future use + lowercase_contains_match_ = absl::AsciiStrToLower(matcher_.contains()); + } + } + } // StringMatcher - bool match(const absl::string_view value) const override; - bool match(const ProtobufWkt::Value& value) const override; - - const envoy::type::matcher::v3::StringMatcher& matcher() const { return matcher_; } + bool match(const absl::string_view value) const override { + switch (matcher_.match_pattern_case()) { + case StringMatcherType::MatchPatternCase::kExact: + return matcher_.ignore_case() ? absl::EqualsIgnoreCase(value, matcher_.exact()) + : value == matcher_.exact(); + case StringMatcherType::MatchPatternCase::kPrefix: + return matcher_.ignore_case() ? absl::StartsWithIgnoreCase(value, matcher_.prefix()) + : absl::StartsWith(value, matcher_.prefix()); + case StringMatcherType::MatchPatternCase::kSuffix: + return matcher_.ignore_case() ? absl::EndsWithIgnoreCase(value, matcher_.suffix()) + : absl::EndsWith(value, matcher_.suffix()); + case StringMatcherType::MatchPatternCase::kContains: + return matcher_.ignore_case() + ? absl::StrContains(absl::AsciiStrToLower(value), lowercase_contains_match_) + : absl::StrContains(value, matcher_.contains()); + case StringMatcherType::MatchPatternCase::kSafeRegex: + return regex_->match(value); + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + bool match(const ProtobufWkt::Value& value) const override { + + if (value.kind_case() != ProtobufWkt::Value::kStringValue) { + return false; + } + + return match(value.string_value()); + } + + const StringMatcherType& matcher() const { return matcher_; } /** * Helps applications optimize the case where a matcher is a case-sensitive @@ -98,10 +142,18 @@ class StringMatcherImpl : public ValueMatcher, public StringMatcher { * @param prefix the returned prefix string * @return true if the matcher is a case-sensitive prefix-match. */ - bool getCaseSensitivePrefixMatch(std::string& prefix) const; + bool getCaseSensitivePrefixMatch(std::string& prefix) const { + if (matcher_.match_pattern_case() == + envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kPrefix && + !matcher_.ignore_case()) { + prefix = matcher_.prefix(); + return true; + } + return false; + } private: - const envoy::type::matcher::v3::StringMatcher matcher_; + const StringMatcherType matcher_; Regex::CompiledMatcherPtr regex_; std::string lowercase_contains_match_; }; @@ -149,7 +201,7 @@ class PathMatcher : public StringMatcher { bool match(const absl::string_view path) const override; private: - const StringMatcherImpl matcher_; + const StringMatcherImpl matcher_; }; } // namespace Matchers diff --git a/source/common/common/regex.cc b/source/common/common/regex.cc index 6b2e0050b487b..0cc34cf7e5e95 100644 --- a/source/common/common/regex.cc +++ b/source/common/common/regex.cc @@ -1,48 +1,29 @@ #include "source/common/common/regex.h" #include "envoy/common/exception.h" -#include "envoy/runtime/runtime.h" #include "envoy/type/matcher/v3/regex.pb.h" #include "source/common/common/assert.h" #include "source/common/common/fmt.h" -#include "source/common/protobuf/utility.h" -#include "source/common/stats/symbol_table_impl.h" - -#include "re2/re2.h" namespace Envoy { namespace Regex { -namespace { - -class CompiledGoogleReMatcher : public CompiledMatcher { -public: - CompiledGoogleReMatcher(const envoy::type::matcher::v3::RegexMatcher& config) - : regex_(config.regex(), re2::RE2::Quiet) { - if (!regex_.ok()) { - throw EnvoyException(regex_.error()); - } - - const uint32_t regex_program_size = static_cast(regex_.ProgramSize()); - // Check if the deprecated field max_program_size is set first, and follow the old logic if so. - if (config.google_re2().has_max_program_size()) { - const uint32_t max_program_size = - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.google_re2(), max_program_size, 100); - if (regex_program_size > max_program_size) { - throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " - "{}. Increase configured max program size if necessary.", - config.regex(), regex_program_size, max_program_size)); - } - return; - } +CompiledGoogleReMatcher::CompiledGoogleReMatcher(const std::string& regex, + bool do_program_size_check) + : regex_(regex, re2::RE2::Quiet) { + if (!regex_.ok()) { + throw EnvoyException(regex_.error()); + } + if (do_program_size_check) { Runtime::Loader* runtime = Runtime::LoaderSingleton::getExisting(); if (runtime) { Stats::Scope& root_scope = runtime->getRootScope(); - // TODO(perf): It would be more efficient to create the stats (program size histogram, warning - // counter) on startup and not with each regex match. + const uint32_t regex_program_size = static_cast(regex_.ProgramSize()); + // TODO(perf): It would be more efficient to create the stats (program size histogram, + // warning counter) on startup and not with each regex match. Stats::StatNameManagedStorage program_size_stat_name("re2.program_size", root_scope.symbolTable()); Stats::Histogram& program_size_stat = root_scope.histogramFromStatName( @@ -59,8 +40,7 @@ class CompiledGoogleReMatcher : public CompiledMatcher { throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " "{} set for the error level threshold. Increase " "configured max program size if necessary.", - config.regex(), regex_program_size, - max_program_size_error_level)); + regex, regex_program_size, max_program_size_error_level)); } const uint32_t max_program_size_warn_level = @@ -71,34 +51,27 @@ class CompiledGoogleReMatcher : public CompiledMatcher { warn, "regex '{}' RE2 program size of {} > max program size of {} set for the warn " "level threshold. Increase configured max program size if necessary.", - config.regex(), regex_program_size, max_program_size_warn_level); + regex, regex_program_size, max_program_size_warn_level); } } } +} - // CompiledMatcher - bool match(absl::string_view value) const override { - return re2::RE2::FullMatch(re2::StringPiece(value.data(), value.size()), regex_); - } - - // CompiledMatcher - std::string replaceAll(absl::string_view value, absl::string_view substitution) const override { - std::string result = std::string(value); - re2::RE2::GlobalReplace(&result, regex_, - re2::StringPiece(substitution.data(), substitution.size())); - return result; +CompiledGoogleReMatcher::CompiledGoogleReMatcher( + const envoy::type::matcher::v3::RegexMatcher& config) + : CompiledGoogleReMatcher(config.regex(), !config.google_re2().has_max_program_size()) { + const uint32_t regex_program_size = static_cast(regex_.ProgramSize()); + + // Check if the deprecated field max_program_size is set first, and follow the old logic if so. + if (config.google_re2().has_max_program_size()) { + const uint32_t max_program_size = + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.google_re2(), max_program_size, 100); + if (regex_program_size > max_program_size) { + throw EnvoyException(fmt::format("regex '{}' RE2 program size of {} > max program size of " + "{}. Increase configured max program size if necessary.", + config.regex(), regex_program_size, max_program_size)); + } } - -private: - const re2::RE2 regex_; -}; - -} // namespace - -CompiledMatcherPtr Utility::parseRegex(const envoy::type::matcher::v3::RegexMatcher& matcher) { - // Google Re is the only currently supported engine. - ASSERT(matcher.has_google_re2()); - return std::make_unique(matcher); } std::regex Utility::parseStdRegex(const std::string& regex, std::regex::flag_type flags) { diff --git a/source/common/common/regex.h b/source/common/common/regex.h index e360cab01b96b..e2aee56170abe 100644 --- a/source/common/common/regex.h +++ b/source/common/common/regex.h @@ -4,11 +4,44 @@ #include #include "envoy/common/regex.h" +#include "envoy/runtime/runtime.h" #include "envoy/type/matcher/v3/regex.pb.h" +#include "source/common/common/assert.h" +#include "source/common/protobuf/utility.h" +#include "source/common/stats/symbol_table_impl.h" + +#include "re2/re2.h" +#include "xds/type/matcher/v3/regex.pb.h" + namespace Envoy { namespace Regex { +class CompiledGoogleReMatcher : public CompiledMatcher { +public: + explicit CompiledGoogleReMatcher(const std::string& regex, bool do_program_size_check); + + explicit CompiledGoogleReMatcher(const xds::type::matcher::v3::RegexMatcher& config) + : CompiledGoogleReMatcher(config.regex(), false) {} + + explicit CompiledGoogleReMatcher(const envoy::type::matcher::v3::RegexMatcher& config); + + // CompiledMatcher + bool match(absl::string_view value) const override { + return re2::RE2::FullMatch(re2::StringPiece(value.data(), value.size()), regex_); + } + + // CompiledMatcher + std::string replaceAll(absl::string_view value, absl::string_view substitution) const override { + std::string result = std::string(value); + re2::RE2::GlobalReplace(&result, regex_, + re2::StringPiece(substitution.data(), substitution.size())); + return result; + } + +private: + const re2::RE2 regex_; +}; enum class Type { Re2, StdRegex }; /** @@ -29,7 +62,12 @@ class Utility { /** * Construct a compiled regex matcher from a match config. */ - static CompiledMatcherPtr parseRegex(const envoy::type::matcher::v3::RegexMatcher& matcher); + template + static CompiledMatcherPtr parseRegex(const RegexMatcherType& matcher) { + // Google Re is the only currently supported engine. + ASSERT(matcher.has_google_re2()); + return std::make_unique(matcher); + } }; } // namespace Regex diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 1890b921b93dc..2fd7a8c75748b 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -8,16 +8,6 @@ licenses(["notice"]) # Apache 2 envoy_package() -envoy_cc_library( - name = "api_type_oracle_lib", - srcs = ["api_type_oracle.cc"], - hdrs = ["api_type_oracle.h"], - deps = [ - "//source/common/protobuf", - "@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto", - ], -) - envoy_cc_library( name = "api_version_lib", hdrs = ["api_version.h"], @@ -112,7 +102,6 @@ envoy_cc_library( "//source/common/common:token_bucket_impl_lib", "//source/common/grpc:common_lib", "//source/common/protobuf", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], ) @@ -172,7 +161,6 @@ envoy_cc_library( "//source/common/memory:utils_lib", "//source/common/protobuf", "@com_google_absl//absl/container:btree", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], ) @@ -198,14 +186,12 @@ envoy_cc_library( ":delta_subscription_state_lib", ":grpc_stream_lib", ":pausable_ack_queue_lib", - ":version_converter_lib", ":watch_map_lib", ":xds_context_params_lib", ":xds_resource_lib", "//envoy/event:dispatcher_interface", "//envoy/grpc:async_client_interface", "//source/common/memory:utils_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], ) @@ -220,7 +206,6 @@ envoy_cc_library( deps = [ ":api_version_lib", ":decoded_resource_lib", - ":version_converter_lib", "//envoy/config:subscription_interface", "//envoy/event:dispatcher_interface", "//source/common/buffer:buffer_lib", @@ -231,7 +216,6 @@ envoy_cc_library( "//source/common/http:rest_api_fetcher_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], ) @@ -277,15 +261,12 @@ envoy_cc_library( name = "protobuf_link_hacks", hdrs = ["protobuf_link_hacks.h"], deps = [ - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/service/cluster/v3:pkg_cc_proto", - "@envoy_api//envoy/service/discovery/v2:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", "@envoy_api//envoy/service/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/service/extension/v3:pkg_cc_proto", + "@envoy_api//envoy/service/health/v3:pkg_cc_proto", "@envoy_api//envoy/service/listener/v3:pkg_cc_proto", - "@envoy_api//envoy/service/ratelimit/v2:pkg_cc_proto", "@envoy_api//envoy/service/ratelimit/v3:pkg_cc_proto", "@envoy_api//envoy/service/route/v3:pkg_cc_proto", "@envoy_api//envoy/service/runtime/v3:pkg_cc_proto", @@ -309,11 +290,7 @@ envoy_cc_library( envoy_cc_library( name = "resource_name_lib", hdrs = ["resource_name.h"], - deps = [ - ":api_type_oracle_lib", - "//source/common/common:assert_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], + deps = ["//source/common/common:assert_lib"], ) envoy_cc_library( @@ -390,8 +367,6 @@ envoy_cc_library( srcs = ["utility.cc"], hdrs = ["utility.h"], deps = [ - ":api_type_oracle_lib", - ":version_converter_lib", "//envoy/config:grpc_mux_interface", "//envoy/config:subscription_interface", "//envoy/local_info:local_info_interface", @@ -419,20 +394,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "version_converter_lib", - srcs = ["version_converter.cc"], - hdrs = ["version_converter.h"], - deps = [ - ":api_type_oracle_lib", - "//source/common/common:assert_lib", - "//source/common/protobuf", - "//source/common/protobuf:visitor_lib", - "//source/common/protobuf:well_known_lib", - "@envoy_api//envoy/config/core/v3:pkg_cc_proto", - ], -) - envoy_cc_library( name = "watch_map_lib", srcs = ["watch_map.cc"], diff --git a/source/common/config/api_type_oracle.cc b/source/common/config/api_type_oracle.cc deleted file mode 100644 index 66da4f79a9283..0000000000000 --- a/source/common/config/api_type_oracle.cc +++ /dev/null @@ -1,36 +0,0 @@ -#include "source/common/config/api_type_oracle.h" - -#include "udpa/annotations/versioning.pb.h" - -namespace Envoy { -namespace Config { - -const Protobuf::Descriptor* -ApiTypeOracle::getEarlierVersionDescriptor(const std::string& message_type) { - const auto previous_message_string = getEarlierVersionMessageTypeName(message_type); - if (previous_message_string != absl::nullopt) { - const Protobuf::Descriptor* earlier_desc = - Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName( - previous_message_string.value()); - return earlier_desc; - } else { - return nullptr; - } -} - -const absl::optional -ApiTypeOracle::getEarlierVersionMessageTypeName(const std::string& message_type) { - // Determine if there is an earlier API version for message_type. - const Protobuf::Descriptor* desc = - Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(std::string{message_type}); - if (desc == nullptr) { - return absl::nullopt; - } - if (desc->options().HasExtension(udpa::annotations::versioning)) { - return desc->options().GetExtension(udpa::annotations::versioning).previous_message_type(); - } - return absl::nullopt; -} - -} // namespace Config -} // namespace Envoy diff --git a/source/common/config/api_type_oracle.h b/source/common/config/api_type_oracle.h deleted file mode 100644 index 7320309b33ccb..0000000000000 --- a/source/common/config/api_type_oracle.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include "source/common/protobuf/protobuf.h" - -#include "absl/strings/string_view.h" -#include "absl/types/optional.h" - -namespace Envoy { -namespace Config { - -class ApiTypeOracle { -public: - /** - * Based on a given message, determine if there exists an earlier version of - * this message. If so, return the descriptor for the earlier - * message, to support upgrading via VersionConverter::upgrade(). - * - * @param message_type protobuf message type - * @return const Protobuf::Descriptor* descriptor for earlier message version - * corresponding to message, if any, otherwise nullptr. - */ - static const Protobuf::Descriptor* getEarlierVersionDescriptor(const std::string& message_type); - - static const absl::optional - getEarlierVersionMessageTypeName(const std::string& message_type); -}; - -} // namespace Config -} // namespace Envoy diff --git a/source/common/config/delta_subscription_state.h b/source/common/config/delta_subscription_state.h index cafdbe0022ee5..9765a6736dc3a 100644 --- a/source/common/config/delta_subscription_state.h +++ b/source/common/config/delta_subscription_state.h @@ -1,6 +1,5 @@ #pragma once -#include "envoy/api/v2/discovery.pb.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" #include "envoy/grpc/status.h" diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 060f0c845796b..4242599901a77 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -4,7 +4,6 @@ #include "source/common/config/decoded_resource_impl.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/memory/utils.h" #include "source/common/protobuf/protobuf.h" @@ -17,28 +16,18 @@ namespace Config { namespace { class AllMuxesState { public: - void insert(GrpcMuxImpl* mux) { - absl::WriterMutexLock locker(&lock_); - muxes_.insert(mux); - } + void insert(GrpcMuxImpl* mux) { muxes_.insert(mux); } - void erase(GrpcMuxImpl* mux) { - absl::WriterMutexLock locker(&lock_); - muxes_.erase(mux); - } + void erase(GrpcMuxImpl* mux) { muxes_.erase(mux); } void shutdownAll() { - absl::WriterMutexLock locker(&lock_); for (auto& mux : muxes_) { mux->shutdown(); } } private: - absl::flat_hash_set muxes_ ABSL_GUARDED_BY(lock_); - - // TODO(ggreenway): can this lock be removed? Is this code only run on the main thread? - absl::Mutex lock_; + absl::flat_hash_set muxes_; }; using AllMuxes = ThreadSafeSingleton; } // namespace @@ -46,14 +35,12 @@ using AllMuxes = ThreadSafeSingleton; GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version, Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node) : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, rate_limit_settings), local_info_(local_info), skip_subsequent_node_(skip_subsequent_node), - first_stream_request_(true), transport_api_version_(transport_api_version), - dispatcher_(dispatcher), + first_stream_request_(true), dispatcher_(dispatcher), dynamic_update_callback_handle_(local_info.contextProvider().addDynamicContextUpdateCallback( [this](absl::string_view resource_type_url) { onDynamicContextUpdate(resource_type_url); @@ -104,7 +91,6 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { } else { request.clear_node(); } - VersionConverter::prepareMessageForGrpcWire(request, transport_api_version_); ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url, request.ShortDebugString()); grpc_stream_.sendMessage(request); first_stream_request_ = false; diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 57b4946099373..e9d1a61828c7a 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -4,7 +4,6 @@ #include #include -#include "envoy/api/v2/discovery.pb.h" #include "envoy/common/random_generator.h" #include "envoy/common/time.h" #include "envoy/config/grpc_mux.h" @@ -35,7 +34,6 @@ class GrpcMuxImpl : public GrpcMux, public: GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version, Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node); @@ -186,7 +184,6 @@ class GrpcMuxImpl : public GrpcMux, // store them; rather, they are simply dropped. This string is a type // URL. std::unique_ptr> request_queue_; - const envoy::config::core::v3::ApiVersion transport_api_version_; Event::Dispatcher& dispatcher_; Common::CallbackHandlePtr dynamic_update_callback_handle_; diff --git a/source/common/config/http_subscription_impl.cc b/source/common/config/http_subscription_impl.cc index ecf6afffa9b98..76b201444f5b6 100644 --- a/source/common/config/http_subscription_impl.cc +++ b/source/common/config/http_subscription_impl.cc @@ -10,7 +10,6 @@ #include "source/common/common/utility.h" #include "source/common/config/decoded_resource_impl.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/http/headers.h" #include "source/common/protobuf/protobuf.h" #include "source/common/protobuf/utility.h" @@ -25,15 +24,15 @@ HttpSubscriptionImpl::HttpSubscriptionImpl( const std::string& remote_cluster_name, Event::Dispatcher& dispatcher, Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, - absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version, - SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, - SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout, + absl::string_view type_url, SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, + std::chrono::milliseconds init_fetch_timeout, ProtobufMessage::ValidationVisitor& validation_visitor) : Http::RestApiFetcher(cm, remote_cluster_name, dispatcher, random, refresh_interval, request_timeout), callbacks_(callbacks), resource_decoder_(resource_decoder), stats_(stats), dispatcher_(dispatcher), init_fetch_timeout_(init_fetch_timeout), - validation_visitor_(validation_visitor), transport_api_version_(transport_api_version) { + validation_visitor_(validation_visitor) { request_.mutable_node()->CopyFrom(local_info.node()); request_.set_type_url(std::string(type_url)); ASSERT(service_method.options().HasExtension(google::api::http)); @@ -74,7 +73,7 @@ void HttpSubscriptionImpl::createRequest(Http::RequestMessage& request) { stats_.update_attempt_.inc(); request.headers().setReferenceMethod(Http::Headers::get().MethodValues.Post); request.headers().setPath(path_); - request.body().add(VersionConverter::getJsonStringFromMessage(request_, transport_api_version_)); + request.body().add(MessageUtil::getJsonStringFromMessageOrDie(request_)); request.headers().setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); request.headers().setContentLength(request.body().length()); } diff --git a/source/common/config/http_subscription_impl.h b/source/common/config/http_subscription_impl.h index 4672e45894882..cd6de9a46a95d 100644 --- a/source/common/config/http_subscription_impl.h +++ b/source/common/config/http_subscription_impl.h @@ -1,6 +1,5 @@ #pragma once -#include "envoy/api/v2/discovery.pb.h" #include "envoy/common/random_generator.h" #include "envoy/config/subscription.h" #include "envoy/event/dispatcher.h" @@ -28,7 +27,6 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, Random::RandomGenerator& random, std::chrono::milliseconds refresh_interval, std::chrono::milliseconds request_timeout, const Protobuf::MethodDescriptor& service_method, absl::string_view type_url, - envoy::config::core::v3::ApiVersion transport_api_version, SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, SubscriptionStats stats, std::chrono::milliseconds init_fetch_timeout, ProtobufMessage::ValidationVisitor& validation_visitor); @@ -61,7 +59,6 @@ class HttpSubscriptionImpl : public Http::RestApiFetcher, std::chrono::milliseconds init_fetch_timeout_; Event::TimerPtr init_fetch_timeout_timer_; ProtobufMessage::ValidationVisitor& validation_visitor_; - const envoy::config::core::v3::ApiVersion transport_api_version_; }; } // namespace Config diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index d0e3db537d0b9..085edec3150d1 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -6,7 +6,6 @@ #include "source/common/common/backoff_strategy.h" #include "source/common/common/token_bucket_impl.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/config/xds_context_params.h" #include "source/common/config/xds_resource.h" #include "source/common/memory/utils.h" @@ -19,28 +18,18 @@ namespace Config { namespace { class AllMuxesState { public: - void insert(NewGrpcMuxImpl* mux) { - absl::WriterMutexLock locker(&lock_); - muxes_.insert(mux); - } + void insert(NewGrpcMuxImpl* mux) { muxes_.insert(mux); } - void erase(NewGrpcMuxImpl* mux) { - absl::WriterMutexLock locker(&lock_); - muxes_.erase(mux); - } + void erase(NewGrpcMuxImpl* mux) { muxes_.erase(mux); } void shutdownAll() { - absl::WriterMutexLock locker(&lock_); for (auto& mux : muxes_) { mux->shutdown(); } } private: - absl::flat_hash_set muxes_ ABSL_GUARDED_BY(lock_); - - // TODO(ggreenway): can this lock be removed? Is this code only run on the main thread? - absl::Mutex lock_; + absl::flat_hash_set muxes_; }; using AllMuxes = ThreadSafeSingleton; } // namespace @@ -48,7 +37,6 @@ using AllMuxes = ThreadSafeSingleton; NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version, Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info) @@ -59,7 +47,7 @@ NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, [this](absl::string_view resource_type_url) { onDynamicContextUpdate(resource_type_url); })), - transport_api_version_(transport_api_version), dispatcher_(dispatcher) { + dispatcher_(dispatcher) { AllMuxes::get().insert(this); } @@ -282,7 +270,6 @@ void NewGrpcMuxImpl::trySendDiscoveryRequests() { } else { request = sub->second->sub_state_.getNextRequestAckless(); } - VersionConverter::prepareMessageForGrpcWire(request, transport_api_version_); grpc_stream_.sendMessage(request); } grpc_stream_.maybeUpdateQueueSizeStat(pausable_ack_queue_.size()); diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index 98ded0dec357b..5c2940ebfe8e6 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -2,7 +2,6 @@ #include -#include "envoy/api/v2/discovery.pb.h" #include "envoy/common/random_generator.h" #include "envoy/common/token_bucket.h" #include "envoy/config/grpc_mux.h" @@ -32,10 +31,8 @@ class NewGrpcMuxImpl Logger::Loggable { public: NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, - const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version, - Random::RandomGenerator& random, Stats::Scope& scope, - const RateLimitSettings& rate_limit_settings, + const Protobuf::MethodDescriptor& service_method, Random::RandomGenerator& random, + Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info); ~NewGrpcMuxImpl() override; @@ -179,7 +176,6 @@ class NewGrpcMuxImpl const LocalInfo::LocalInfo& local_info_; Common::CallbackHandlePtr dynamic_update_callback_handle_; - const envoy::config::core::v3::ApiVersion transport_api_version_; Event::Dispatcher& dispatcher_; // True iff Envoy is shutting down; no messages should be sent on the `grpc_stream_` when this is diff --git a/source/common/config/protobuf_link_hacks.h b/source/common/config/protobuf_link_hacks.h index de6e816e37b9a..991291789ef9a 100644 --- a/source/common/config/protobuf_link_hacks.h +++ b/source/common/config/protobuf_link_hacks.h @@ -1,21 +1,11 @@ #pragma once -#include "envoy/api/v2/cds.pb.h" -#include "envoy/api/v2/eds.pb.h" -#include "envoy/api/v2/lds.pb.h" -#include "envoy/api/v2/rds.pb.h" -#include "envoy/api/v2/srds.pb.h" -#include "envoy/config/bootstrap/v2/bootstrap.pb.h" #include "envoy/service/cluster/v3/cds.pb.h" -#include "envoy/service/discovery/v2/ads.pb.h" -#include "envoy/service/discovery/v2/hds.pb.h" -#include "envoy/service/discovery/v2/rtds.pb.h" -#include "envoy/service/discovery/v2/sds.pb.h" #include "envoy/service/discovery/v3/ads.pb.h" #include "envoy/service/endpoint/v3/eds.pb.h" #include "envoy/service/extension/v3/config_discovery.pb.h" +#include "envoy/service/health/v3/hds.pb.h" #include "envoy/service/listener/v3/lds.pb.h" -#include "envoy/service/ratelimit/v2/rls.pb.h" #include "envoy/service/ratelimit/v3/rls.pb.h" #include "envoy/service/route/v3/rds.pb.h" #include "envoy/service/route/v3/srds.pb.h" @@ -26,18 +16,6 @@ namespace Envoy { -// Hack to force linking of the service: https://github.com/google/protobuf/issues/4221. -// This file should be included ONLY if this hack is required. -const envoy::service::discovery::v2::AdsDummy _ads_dummy_v2; -const envoy::service::ratelimit::v2::RateLimitRequest _rls_dummy_v2; -const envoy::service::discovery::v2::SdsDummy _sds_dummy_v2; -const envoy::service::discovery::v2::RtdsDummy _tds_dummy_v2; -const envoy::api::v2::LdsDummy _lds_dummy_v2; -const envoy::api::v2::RdsDummy _rds_dummy_v2; -const envoy::api::v2::CdsDummy _cds_dummy_v2; -const envoy::api::v2::EdsDummy _eds_dummy_v2; -const envoy::api::v2::SrdsDummy _srds_dummy_v2; - const envoy::service::discovery::v3::AdsDummy _ads_dummy_v3; const envoy::service::ratelimit::v3::RateLimitRequest _rls_dummy_v3; const envoy::service::secret::v3::SdsDummy _sds_dummy_v3; @@ -48,19 +26,7 @@ const envoy::service::cluster::v3::CdsDummy _cds_dummy_v3; const envoy::service::endpoint::v3::EdsDummy _eds_dummy_v3; const envoy::service::route::v3::SrdsDummy _srds_dummy_v3; const envoy::service::extension::v3::EcdsDummy _ecds_dummy_v3; - -// With the v2 -> v3 migration there is another, related linking issue. -// Symbols for v2 protos which headers are not included in any file in the codebase are being -// dropped by the linker in some circumstances. For example, in the Envoy Mobile iOS build system. -// Even though all v2 packages are included as a dependency in their corresponding v3 package, and -// `always_link` is set for all proto bazel targets. -// Further proof of this can be seen by way of counter example with the envoy.api.v2.Cluster type, -// which is checked for by proto_descriptors.cc. This type **is** getting linked because its headers -// is still included in cds_api_impl.cc. On the other side because the v2 hds header is not included -// anywhere the v2 service type is getting dropped, and thus the descriptor is not present in the -// descriptor pool. -// https://github.com/envoyproxy/envoy/issues/9639 -const envoy::config::bootstrap::v2::Bootstrap _bootstrap_dummy_v2; -const envoy::service::discovery::v2::Capability _hds_dummy_v2; +const envoy::service::runtime::v3::RtdsDummy _rtds_dummy_v3; +const envoy::service::health::v3::HdsDummy _hds_dummy_v3; } // namespace Envoy diff --git a/source/common/config/resource_name.h b/source/common/config/resource_name.h index b3ca8e8175266..16d84e97bd6a7 100644 --- a/source/common/config/resource_name.h +++ b/source/common/config/resource_name.h @@ -3,58 +3,21 @@ #include #include -#include "envoy/config/core/v3/config_source.pb.h" - -#include "source/common/common/assert.h" -#include "source/common/config/api_type_oracle.h" - namespace Envoy { namespace Config { /** - * Get resource name from api type and version. - */ -template -std::string getResourceName(envoy::config::core::v3::ApiVersion resource_api_version) { - switch (resource_api_version) { - case envoy::config::core::v3::ApiVersion::AUTO: - case envoy::config::core::v3::ApiVersion::V2: - return ApiTypeOracle::getEarlierVersionMessageTypeName(Current().GetDescriptor()->full_name()) - .value(); - case envoy::config::core::v3::ApiVersion::V3: - return Current().GetDescriptor()->full_name(); - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } -} - -/** - * Get type url from api type and version. - */ -template -std::string getTypeUrl(envoy::config::core::v3::ApiVersion resource_api_version) { - return "type.googleapis.com/" + getResourceName(resource_api_version); -} - -/** - * get all version resource names. + * Get resource name from api type. */ -template std::vector getAllVersionResourceNames() { - return std::vector{ - Current().GetDescriptor()->full_name(), - ApiTypeOracle::getEarlierVersionMessageTypeName(Current().GetDescriptor()->full_name()) - .value()}; +template std::string getResourceName() { + return Current().GetDescriptor()->full_name(); } /** - * get all version type urls. + * Get type url from api type. */ -template std::vector getAllVersionTypeUrls() { - auto resource_names = getAllVersionResourceNames(); - for (auto&& resource_name : resource_names) { - resource_name = "type.googleapis.com/" + resource_name; - } - return resource_names; +template std::string getTypeUrl() { + return "type.googleapis.com/" + getResourceName(); } } // namespace Config diff --git a/source/common/config/subscription_base.h b/source/common/config/subscription_base.h index 447931d1c36b1..7207c26582e84 100644 --- a/source/common/config/subscription_base.h +++ b/source/common/config/subscription_base.h @@ -10,20 +10,14 @@ namespace Config { template struct SubscriptionBase : public Config::SubscriptionCallbacks { public: - SubscriptionBase(const envoy::config::core::v3::ApiVersion api_version, - ProtobufMessage::ValidationVisitor& validation_visitor, + SubscriptionBase(ProtobufMessage::ValidationVisitor& validation_visitor, absl::string_view name_field) - : resource_decoder_(validation_visitor, name_field), api_version_(api_version) {} + : resource_decoder_(validation_visitor, name_field) {} - std::string getResourceName() const { - return Envoy::Config::getResourceName(api_version_); - } + std::string getResourceName() const { return Envoy::Config::getResourceName(); } protected: Config::OpaqueResourceDecoderImpl resource_decoder_; - -private: - const envoy::config::core::v3::ApiVersion api_version_; }; } // namespace Config diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index e21ece1f106bf..74f5fb439e3e1 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -41,7 +41,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( const envoy::config::core::v3::ApiConfigSource& api_config_source = config.api_config_source(); Utility::checkApiConfigSourceSubscriptionBackingCluster(cm_.primaryClusters(), api_config_source); - const auto transport_api_version = Utility::getAndCheckTransportVersion(api_config_source); + Utility::checkTransportVersion(api_config_source); + const auto transport_api_version = envoy::config::core::v3::ApiVersion::V3; switch (api_config_source.api_type()) { case envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY: throw EnvoyException( @@ -53,9 +54,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( local_info_, cm_, api_config_source.cluster_names()[0], dispatcher_, api_.randomGenerator(), Utility::apiConfigSourceRefreshDelay(api_config_source), Utility::apiConfigSourceRequestTimeout(api_config_source), - restMethod(type_url, transport_api_version), type_url, transport_api_version, callbacks, - resource_decoder, stats, Utility::configSourceInitialFetchTimeout(config), - validation_visitor_); + restMethod(type_url, transport_api_version), type_url, callbacks, resource_decoder, stats, + Utility::configSourceInitialFetchTimeout(config), validation_visitor_); case envoy::config::core::v3::ApiConfigSource::GRPC: return std::make_unique( std::make_shared( @@ -63,8 +63,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, scope, true) ->createUncachedRawAsyncClient(), - dispatcher_, sotwGrpcMethod(type_url, transport_api_version), transport_api_version, - api_.randomGenerator(), scope, Utility::parseRateLimitSettings(api_config_source), + dispatcher_, sotwGrpcMethod(type_url, transport_api_version), api_.randomGenerator(), + scope, Utility::parseRateLimitSettings(api_config_source), api_config_source.set_node_on_first_message_only()), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), @@ -75,9 +75,8 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Config::Utility::factoryForGrpcApiConfigSource(cm_.grpcAsyncClientManager(), api_config_source, scope, true) ->createUncachedRawAsyncClient(), - dispatcher_, deltaGrpcMethod(type_url, transport_api_version), transport_api_version, - api_.randomGenerator(), scope, Utility::parseRateLimitSettings(api_config_source), - local_info_), + dispatcher_, deltaGrpcMethod(type_url, transport_api_version), api_.randomGenerator(), + scope, Utility::parseRateLimitSettings(api_config_source), local_info_), callbacks, resource_decoder, stats, type_url, dispatcher_, Utility::configSourceInitialFetchTimeout(config), /*is_aggregated*/ false, options); } @@ -135,8 +134,8 @@ SubscriptionPtr SubscriptionFactoryImpl::collectionSubscriptionFromUrl( api_config_source, scope, true) ->createUncachedRawAsyncClient(), dispatcher_, deltaGrpcMethod(type_url, envoy::config::core::v3::ApiVersion::V3), - envoy::config::core::v3::ApiVersion::V3, api_.randomGenerator(), scope, - Utility::parseRateLimitSettings(api_config_source), local_info_), + api_.randomGenerator(), scope, Utility::parseRateLimitSettings(api_config_source), + local_info_), callbacks, resource_decoder, stats, dispatcher_, Utility::configSourceInitialFetchTimeout(config), false, options); } diff --git a/source/common/config/type_to_endpoint.cc b/source/common/config/type_to_endpoint.cc index 2ac9af5b27f54..1add1d4661ef0 100644 --- a/source/common/config/type_to_endpoint.cc +++ b/source/common/config/type_to_endpoint.cc @@ -6,9 +6,6 @@ // API_NO_BOOST_FILE -#define SERVICE_VERSION_INFO(v2, v3) \ - createServiceVersionInfoMap(v2, {v2, v3}), createServiceVersionInfoMap(v3, {v2, v3}) - namespace Envoy { namespace Config { @@ -18,58 +15,6 @@ namespace { // "envoy.service.route.v3.RouteDiscoveryService". using ServiceName = std::string; -struct ServiceVersionInfo { - // This hold a name for each transport_api_version, for example for - // "envoy.api.v2.RouteDiscoveryService": - // { - // "V2": "envoy.api.v2.RouteDiscoveryService", - // "V3": "envoy.service.route.v3.RouteDiscoveryService" - // } - absl::flat_hash_map names_; -}; - -// A ServiceVersionInfoMap holds a service's transport_api_version and possible names for each -// available transport_api_version. For examples: -// -// Given "envoy.api.v2.RouteDiscoveryService" as the service name: -// { -// "envoy.api.v2.RouteDiscoveryService": { -// "names_": { -// "V2": "envoy.api.v2.RouteDiscoveryService", -// "V3": "envoy.service.route.v3.RouteDiscoveryService" -// } -// } -// } -// -// And for "envoy.service.route.v3.RouteDiscoveryService": -// { -// "envoy.service.route.v3.RouteDiscoveryService": -// "names_": { -// "V2": "envoy.api.v2.RouteDiscoveryService", -// "V3": "envoy.service.route.v3.RouteDiscoveryService" -// } -// } -// } -using ServiceVersionInfoMap = absl::flat_hash_map; - -// This creates a ServiceVersionInfoMap, with service name (For example: -// "envoy.api.v2.RouteDiscoveryService") as the key. -ServiceVersionInfoMap -createServiceVersionInfoMap(absl::string_view service_name, - const std::array& versioned_service_names) { - const auto key = static_cast(service_name); - return ServiceVersionInfoMap{{ - // ServiceName as the key. - key, - - // ServiceVersionInfo as the value. - ServiceVersionInfo{{ - {envoy::config::core::v3::ApiVersion::V2, versioned_service_names[0]}, - {envoy::config::core::v3::ApiVersion::V3, versioned_service_names[1]}, - }}, - }}; -} - // A resource type URL. For example: "type.googleapis.com/envoy.api.v2.RouteConfiguration". using TypeUrl = std::string; @@ -86,177 +31,90 @@ TypeUrl getResourceTypeUrl(absl::string_view service_name) { // A method name, e.g. "envoy.api.v2.RouteDiscoveryService.StreamRoutes". using MethodName = std::string; -struct VersionedDiscoveryType { - // A map of transport_api_version to discovery service RPC method fully qualified names. e.g. - // { - // "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", - // "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" - // } - absl::flat_hash_map methods_; +// This holds discovery types. +struct V3Service { + MethodName sotw_grpc_; + MethodName delta_grpc_; + MethodName rest_; }; -// This holds versioned discovery types. -struct VersionedService { - VersionedDiscoveryType sotw_grpc_; - VersionedDiscoveryType delta_grpc_; - VersionedDiscoveryType rest_; -}; - -using TypeUrlToVersionedServiceMap = absl::flat_hash_map; +using TypeUrlToV3ServiceMap = absl::flat_hash_map; -// buildTypeUrlToServiceMap() builds a reverse map from a resource type URLs to a versioned service -// (by transport_api_version). -// -// The way we build it is by firstly constructing a list of ServiceVersionInfoMap: -// [ -// { -// "envoy.api.v2.RouteDiscoveryService": { -// "names_": { -// "V2": "envoy.api.v2.RouteDiscoveryService", -// "V3": "envoy.service.route.v3.RouteDiscoveryService" -// } -// } -// }, -// { -// "envoy.service.route.v3.RouteDiscoveryService": { -// "names_": { -// "V2": "envoy.api.v2.RouteDiscoveryService", -// "V3": "envoy.service.route.v3.RouteDiscoveryService" -// } -// } -// } -// ... -// ] -// -// Then we convert it into the following map, with the inferred resource type URL as the key: -// -// { -// "type.googleapis.com/envoy.api.v2.RouteConfiguration": { -// "sotw_grpc_": { -// "methods_": { -// "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", -// "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" -// } -// }, -// ... -// }, -// "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { -// "sotw_grpc_": { -// "methods_": { -// "V2": "envoy.api.v2.RouteDiscoveryService.StreamRoutes", -// "V3": "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes" -// } -// }, -// ... -// } -// } -// -TypeUrlToVersionedServiceMap* buildTypeUrlToServiceMap() { - auto* type_url_to_versioned_service_map = new TypeUrlToVersionedServiceMap(); +TypeUrlToV3ServiceMap* buildTypeUrlToServiceMap() { + auto* type_url_to_versioned_service_map = new TypeUrlToV3ServiceMap(); // This happens once in the lifetime of Envoy. We build a reverse map from resource type URL to // service methods (versioned by transport_api_version). We explicitly enumerate all services, // since DescriptorPool doesn't support iterating over all descriptors, due its lazy load design, // see https://www.mail-archive.com/protobuf@googlegroups.com/msg04540.html. - for (const ServiceVersionInfoMap& registered : { - SERVICE_VERSION_INFO("envoy.api.v2.RouteDiscoveryService", - "envoy.service.route.v3.RouteDiscoveryService"), - SERVICE_VERSION_INFO("envoy.api.v2.ScopedRoutesDiscoveryService", - "envoy.service.route.v3.ScopedRoutesDiscoveryService"), - SERVICE_VERSION_INFO("envoy.api.v2.ScopedRoutesDiscoveryService", - "envoy.service.route.v3.ScopedRoutesDiscoveryService"), - SERVICE_VERSION_INFO("envoy.api.v2.VirtualHostDiscoveryService", - "envoy.service.route.v3.VirtualHostDiscoveryService"), - SERVICE_VERSION_INFO("envoy.service.discovery.v2.SecretDiscoveryService", - "envoy.service.secret.v3.SecretDiscoveryService"), - SERVICE_VERSION_INFO("envoy.api.v2.ClusterDiscoveryService", - "envoy.service.cluster.v3.ClusterDiscoveryService"), - SERVICE_VERSION_INFO("envoy.api.v2.EndpointDiscoveryService", - "envoy.service.endpoint.v3.EndpointDiscoveryService"), - SERVICE_VERSION_INFO("envoy.api.v2.ListenerDiscoveryService", - "envoy.service.listener.v3.ListenerDiscoveryService"), - SERVICE_VERSION_INFO("envoy.service.discovery.v2.RuntimeDiscoveryService", - "envoy.service.runtime.v3.RuntimeDiscoveryService"), - ServiceVersionInfoMap{{ - "envoy.service.extension.v3.ExtensionConfigDiscoveryService", - ServiceVersionInfo{{ - {envoy::config::core::v3::ApiVersion::V3, - "envoy.service.extension.v3.ExtensionConfigDiscoveryService"}, - }}, - }}, + for (absl::string_view name : { + "envoy.service.route.v3.RouteDiscoveryService", + "envoy.service.route.v3.ScopedRoutesDiscoveryService", + "envoy.service.route.v3.ScopedRoutesDiscoveryService", + "envoy.service.route.v3.VirtualHostDiscoveryService", + "envoy.service.secret.v3.SecretDiscoveryService", + "envoy.service.cluster.v3.ClusterDiscoveryService", + "envoy.service.endpoint.v3.EndpointDiscoveryService", + "envoy.service.listener.v3.ListenerDiscoveryService", + "envoy.service.runtime.v3.RuntimeDiscoveryService", + "envoy.service.extension.v3.ExtensionConfigDiscoveryService", }) { - for (const auto& [registered_service_name, registered_service_info] : registered) { - const TypeUrl resource_type_url = getResourceTypeUrl(registered_service_name); - VersionedService& service = (*type_url_to_versioned_service_map)[resource_type_url]; - - for (const auto& [transport_api_version, service_name] : registered_service_info.names_) { - const auto* service_desc = - Protobuf::DescriptorPool::generated_pool()->FindServiceByName(service_name); - ASSERT(service_desc != nullptr, fmt::format("{} missing", service_name)); - ASSERT(service_desc->options().HasExtension(envoy::annotations::resource)); - - // We populate the service methods that are known below, but it's possible that some - // services don't implement all, e.g. VHDS doesn't support SotW or REST. - for (int method_index = 0; method_index < service_desc->method_count(); ++method_index) { - const auto& method_desc = *service_desc->method(method_index); - if (absl::StartsWith(method_desc.name(), "Stream")) { - service.sotw_grpc_.methods_[transport_api_version] = method_desc.full_name(); - } else if (absl::StartsWith(method_desc.name(), "Delta")) { - service.delta_grpc_.methods_[transport_api_version] = method_desc.full_name(); - } else if (absl::StartsWith(method_desc.name(), "Fetch")) { - service.rest_.methods_[transport_api_version] = method_desc.full_name(); - } else { - ASSERT(false, "Unknown xDS service method"); - } - } + const TypeUrl resource_type_url = getResourceTypeUrl(name); + V3Service& service = (*type_url_to_versioned_service_map)[resource_type_url]; + + const auto* service_desc = + Protobuf::DescriptorPool::generated_pool()->FindServiceByName(std::string(name)); + ASSERT(service_desc != nullptr, fmt::format("{} missing", name)); + ASSERT(service_desc->options().HasExtension(envoy::annotations::resource)); + + // We populate the service methods that are known below, but it's possible that some + // services don't implement all, e.g. VHDS doesn't support SotW or REST. + for (int method_index = 0; method_index < service_desc->method_count(); ++method_index) { + const auto& method_desc = *service_desc->method(method_index); + if (absl::StartsWith(method_desc.name(), "Stream")) { + service.sotw_grpc_ = method_desc.full_name(); + } else if (absl::StartsWith(method_desc.name(), "Delta")) { + service.delta_grpc_ = method_desc.full_name(); + } else if (absl::StartsWith(method_desc.name(), "Fetch")) { + service.rest_ = method_desc.full_name(); + } else { + ASSERT(false, "Unknown xDS service method"); } } } return type_url_to_versioned_service_map; } -TypeUrlToVersionedServiceMap& typeUrlToVersionedServiceMap() { - static TypeUrlToVersionedServiceMap* type_url_to_versioned_service_map = - buildTypeUrlToServiceMap(); +TypeUrlToV3ServiceMap& typeUrlToV3ServiceMap() { + static TypeUrlToV3ServiceMap* type_url_to_versioned_service_map = buildTypeUrlToServiceMap(); return *type_url_to_versioned_service_map; } -envoy::config::core::v3::ApiVersion -effectiveTransportApiVersion(envoy::config::core::v3::ApiVersion transport_api_version) { - // By default (when the transport_api_version is "AUTO"), the effective transport_api_version is - // envoy::config::core::v3::ApiVersion::V2. - if (transport_api_version == envoy::config::core::v3::ApiVersion::AUTO) { - return envoy::config::core::v3::ApiVersion::V2; - } - return transport_api_version; -} - } // namespace +// TODO(alyssawilk) clean up transport_api_version argument. const Protobuf::MethodDescriptor& deltaGrpcMethod(absl::string_view type_url, - envoy::config::core::v3::ApiVersion transport_api_version) { - const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToVersionedServiceMap().cend()); - return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - it->second.delta_grpc_.methods_[effectiveTransportApiVersion(transport_api_version)]); + envoy::config::core::v3::ApiVersion /*transport_api_version*/) { + const auto it = typeUrlToV3ServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToV3ServiceMap().cend()); + return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(it->second.delta_grpc_); } const Protobuf::MethodDescriptor& sotwGrpcMethod(absl::string_view type_url, - envoy::config::core::v3::ApiVersion transport_api_version) { - const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToVersionedServiceMap().cend()); - return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - it->second.sotw_grpc_.methods_[effectiveTransportApiVersion(transport_api_version)]); + envoy::config::core::v3::ApiVersion /*transport_api_version*/) { + const auto it = typeUrlToV3ServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToV3ServiceMap().cend()); + return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(it->second.sotw_grpc_); } const Protobuf::MethodDescriptor& -restMethod(absl::string_view type_url, envoy::config::core::v3::ApiVersion transport_api_version) { - const auto it = typeUrlToVersionedServiceMap().find(static_cast(type_url)); - ASSERT(it != typeUrlToVersionedServiceMap().cend()); - return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - it->second.rest_.methods_[effectiveTransportApiVersion(transport_api_version)]); +restMethod(absl::string_view type_url, + envoy::config::core::v3::ApiVersion /*transport_api_version*/) { + const auto it = typeUrlToV3ServiceMap().find(static_cast(type_url)); + ASSERT(it != typeUrlToV3ServiceMap().cend()); + return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(it->second.rest_); } } // namespace Config diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index 89b752be1940a..912ea6cb88458 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -13,8 +13,6 @@ #include "source/common/common/fmt.h" #include "source/common/common/hex.h" #include "source/common/common/utility.h" -#include "source/common/config/api_type_oracle.h" -#include "source/common/config/version_converter.h" #include "source/common/config/well_known_names.h" #include "source/common/protobuf/protobuf.h" #include "source/common/protobuf/utility.h" @@ -93,8 +91,7 @@ void Utility::checkFilesystemSubscriptionBackingPath(const std::string& path, Ap // watch addition. if (!api.fileSystem().fileExists(path)) { throw EnvoyException(fmt::format( - "envoy::api::v2::Path must refer to an existing path in the system: '{}' does not exist", - path)); + "paths must refer to an existing path in the system: '{}' does not exist", path)); } } @@ -252,7 +249,6 @@ Grpc::AsyncClientFactoryPtr Utility::factoryForGrpcApiConfigSource( } void Utility::translateOpaqueConfig(const ProtobufWkt::Any& typed_config, - const ProtobufWkt::Struct& config, ProtobufMessage::ValidationVisitor& validation_visitor, Protobuf::Message& out_proto) { static const std::string struct_type = @@ -285,10 +281,6 @@ void Utility::translateOpaqueConfig(const ProtobufWkt::Any& typed_config, MessageUtil::jsonConvert(struct_config, validation_visitor, out_proto); } } - - if (!config.fields().empty()) { - MessageUtil::jsonConvert(config, validation_visitor, out_proto); - } } } // namespace Config diff --git a/source/common/config/utility.h b/source/common/config/utility.h index e4227ff06caec..1d0603a1e45d2 100644 --- a/source/common/config/utility.h +++ b/source/common/config/utility.h @@ -185,15 +185,11 @@ class Utility { const envoy::config::core::v3::ApiConfigSource& api_config_source); /** - * Access transport_api_version field in ApiConfigSource, while validating version - * compatibility. + * Validate transport_api_version field in ApiConfigSource. * @param api_config_source the config source to extract transport API version from. - * @return envoy::config::core::v3::ApiVersion transport API version * @throws DeprecatedMajorVersionException when the transport version is disabled. */ - template - static envoy::config::core::v3::ApiVersion - getAndCheckTransportVersion(const Proto& api_config_source) { + template static void checkTransportVersion(const Proto& api_config_source) { const auto transport_api_version = api_config_source.transport_api_version(); ASSERT(Thread::MainThread::isMainThread()); if (transport_api_version == envoy::config::core::v3::ApiVersion::AUTO || @@ -206,12 +202,8 @@ class Utility { "following the advice in https://www.envoyproxy.io/docs/envoy/latest/faq/api/transition.", api_config_source.DebugString()); ENVOY_LOG_MISC(warn, warning); - if (!Runtime::runtimeFeatureEnabled( - "envoy.test_only.broken_in_production.enable_deprecated_v2_api")) { - throw DeprecatedMajorVersionException(warning); - } + throw DeprecatedMajorVersionException(warning); } - return transport_api_version; } /** @@ -383,9 +375,7 @@ class Utility { // Check that the config type is not google.protobuf.Empty RELEASE_ASSERT(config->GetDescriptor()->full_name() != "google.protobuf.Empty", ""); - translateOpaqueConfig(enclosing_message.typed_config(), - enclosing_message.hidden_envoy_deprecated_config(), validation_visitor, - *config); + translateOpaqueConfig(enclosing_message.typed_config(), validation_visitor, *config); return config; } @@ -409,7 +399,7 @@ class Utility { // Check that the config type is not google.protobuf.Empty RELEASE_ASSERT(config->GetDescriptor()->full_name() != "google.protobuf.Empty", ""); - translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), validation_visitor, *config); + translateOpaqueConfig(typed_config, validation_visitor, *config); return config; } @@ -453,15 +443,12 @@ class Utility { Stats::Scope& scope, bool skip_cluster_check); /** - * Translate opaque config from google.protobuf.Any or google.protobuf.Struct to defined proto - * message. + * Translate opaque config from google.protobuf.Any to defined proto message. * @param typed_config opaque config packed in google.protobuf.Any - * @param config the deprecated google.protobuf.Struct config, empty struct if doesn't exist. * @param validation_visitor message validation visitor instance. * @param out_proto the proto message instantiated by extensions */ static void translateOpaqueConfig(const ProtobufWkt::Any& typed_config, - const ProtobufWkt::Struct& config, ProtobufMessage::ValidationVisitor& validation_visitor, Protobuf::Message& out_proto); diff --git a/source/common/config/version_converter.cc b/source/common/config/version_converter.cc deleted file mode 100644 index be6bfa25b5ac3..0000000000000 --- a/source/common/config/version_converter.cc +++ /dev/null @@ -1,232 +0,0 @@ -#include "source/common/config/version_converter.h" - -#include "envoy/common/exception.h" - -#include "source/common/common/assert.h" -#include "source/common/common/macros.h" -#include "source/common/config/api_type_oracle.h" -#include "source/common/protobuf/visitor.h" -#include "source/common/protobuf/well_known.h" - -#include "absl/strings/match.h" - -namespace Envoy { -namespace Config { - -namespace { - -class ProtoVisitor { -public: - virtual ~ProtoVisitor() = default; - - // Invoked when a field is visited, with the message, field descriptor and - // context. Returns a new context for use when traversing the sub-message in a - // field. - virtual const void* onField(Protobuf::Message&, const Protobuf::FieldDescriptor&, - const void* ctxt) { - return ctxt; - } - - // Invoked when a message is visited, with the message and a context. - virtual void onMessage(Protobuf::Message&, const void*){}; -}; - -// Reinterpret a Protobuf message as another Protobuf message by converting to wire format and back. -// This only works for messages that can be effectively duck typed this way, e.g. with a subtype -// relationship modulo field name. -void wireCast(const Protobuf::Message& src, Protobuf::Message& dst) { - // This should should generally succeed, but if there are malformed UTF-8 strings in a message, - // this can fail. - if (!dst.ParseFromString(src.SerializeAsString())) { - throw EnvoyException("Unable to deserialize during wireCast()"); - } -} - -// Create a new dynamic message based on some message wire cast to the target -// descriptor. If the descriptor is null, a copy is performed. -DynamicMessagePtr createForDescriptorWithCast(const Protobuf::Message& message, - const Protobuf::Descriptor* desc) { - auto dynamic_message = std::make_unique(); - if (desc != nullptr) { - dynamic_message->msg_.reset(dynamic_message->dynamic_msg_factory_.GetPrototype(desc)->New()); - wireCast(message, *dynamic_message->msg_); - return dynamic_message; - } - // Unnecessary copy, since the existing message is being treated as - // "dynamic". However, we want to transfer an owned object, so this is the - // best we can do. - dynamic_message->msg_.reset(message.New()); - dynamic_message->msg_->MergeFrom(message); - return dynamic_message; -} - -} // namespace - -void VersionConverter::upgrade(const Protobuf::Message& prev_message, - Protobuf::Message& next_message) { - wireCast(prev_message, next_message); - // Track original type to support recoverOriginal(). - annotateWithOriginalType(*prev_message.GetDescriptor(), next_message); -} - -// This needs to be recursive, since sub-messages are consumed and stored -// internally, we later want to recover their original types. -void VersionConverter::annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, - Protobuf::Message& upgraded_message) { - class TypeAnnotatingProtoVisitor : public ProtobufMessage::ProtoVisitor { - public: - void onMessage(Protobuf::Message& message, const void* ctxt) override { - const Protobuf::Descriptor* descriptor = message.GetDescriptor(); - const Protobuf::Reflection* reflection = message.GetReflection(); - const Protobuf::Descriptor* prev_descriptor = static_cast(ctxt); - // If there is no previous descriptor for this message, we don't need to annotate anything. - if (prev_descriptor == nullptr) { - return; - } - // If they are the same type, there's no possibility of any different type - // further down, so we're done. - if (descriptor->full_name() == prev_descriptor->full_name()) { - return; - } - auto* unknown_field_set = reflection->MutableUnknownFields(&message); - unknown_field_set->AddLengthDelimited(ProtobufWellKnown::OriginalTypeFieldNumber, - prev_descriptor->full_name()); - } - - const void* onField(Protobuf::Message&, const Protobuf::FieldDescriptor& field, - const void* ctxt) override { - const Protobuf::Descriptor* prev_descriptor = static_cast(ctxt); - // If there is no previous descriptor for this field, we don't need to annotate anything. - if (prev_descriptor == nullptr) { - return nullptr; - } - // TODO(htuch): This is a terrible hack, there should be no per-resource - // business logic in this file. The reason this is required is that - // endpoints, when captured in configuration such as inlined hosts in - // Clusters for config dump purposes, can potentially contribute a - // significant amount to memory consumption. stats_integration_test - // complains as a result if we increase any memory due to type annotations. - // In theory, we should be able to just clean up these annotations in - // ClusterManagerImpl with type erasure, but protobuf doesn't free up memory - // as expected, we probably need some arena level trick to address this. - if (prev_descriptor->full_name() == "envoy.api.v2.Cluster" && - field.name() == "load_assignment") { - // This will cause the sub-message visit to abort early. - return field.message_type(); - } - const Protobuf::FieldDescriptor* prev_field = - prev_descriptor->FindFieldByNumber(field.number()); - return prev_field != nullptr ? prev_field->message_type() : nullptr; - } - }; - TypeAnnotatingProtoVisitor proto_visitor; - ProtobufMessage::traverseMutableMessage(proto_visitor, upgraded_message, &prev_descriptor); -} - -void VersionConverter::eraseOriginalTypeInformation(Protobuf::Message& message) { - class TypeErasingProtoVisitor : public ProtobufMessage::ProtoVisitor { - public: - void onMessage(Protobuf::Message& message, const void*) override { - const Protobuf::Reflection* reflection = message.GetReflection(); - auto* unknown_field_set = reflection->MutableUnknownFields(&message); - unknown_field_set->DeleteByNumber(ProtobufWellKnown::OriginalTypeFieldNumber); - } - }; - TypeErasingProtoVisitor proto_visitor; - ProtobufMessage::traverseMutableMessage(proto_visitor, message, nullptr); -} - -DynamicMessagePtr VersionConverter::recoverOriginal(const Protobuf::Message& upgraded_message) { - const Protobuf::Reflection* reflection = upgraded_message.GetReflection(); - const auto& unknown_field_set = reflection->GetUnknownFields(upgraded_message); - for (int i = 0; i < unknown_field_set.field_count(); ++i) { - const auto& unknown_field = unknown_field_set.field(i); - if (unknown_field.number() == ProtobufWellKnown::OriginalTypeFieldNumber) { - ASSERT(unknown_field.type() == Protobuf::UnknownField::TYPE_LENGTH_DELIMITED); - const std::string& original_type = unknown_field.length_delimited(); - const Protobuf::Descriptor* original_descriptor = - Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(original_type); - auto result = createForDescriptorWithCast(upgraded_message, original_descriptor); - // We should clear out the OriginalTypeFieldNumber in the recovered message. - eraseOriginalTypeInformation(*result->msg_); - return result; - } - } - return createForDescriptorWithCast(upgraded_message, nullptr); -} - -DynamicMessagePtr VersionConverter::downgrade(const Protobuf::Message& message) { - const Protobuf::Descriptor* prev_desc = - ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name()); - return createForDescriptorWithCast(message, prev_desc); -} - -std::string -VersionConverter::getJsonStringFromMessage(const Protobuf::Message& message, - envoy::config::core::v3::ApiVersion api_version) { - DynamicMessagePtr dynamic_message; - switch (api_version) { - case envoy::config::core::v3::ApiVersion::AUTO: - FALLTHRU; - case envoy::config::core::v3::ApiVersion::V2: { - // TODO(htuch): this works as long as there are no new fields in the v3+ - // DiscoveryRequest. When they are added, we need to do a full v2 conversion - // and also discard unknown fields. Tracked at - // https://github.com/envoyproxy/envoy/issues/9619. - dynamic_message = downgrade(message); - break; - } - case envoy::config::core::v3::ApiVersion::V3: { - // We need to scrub the hidden fields. - dynamic_message = std::make_unique(); - dynamic_message->msg_.reset(message.New()); - dynamic_message->msg_->MergeFrom(message); - VersionUtil::scrubHiddenEnvoyDeprecated(*dynamic_message->msg_); - break; - } - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - eraseOriginalTypeInformation(*dynamic_message->msg_); - std::string json; - Protobuf::util::JsonPrintOptions json_options; - json_options.preserve_proto_field_names = true; - const auto status = - Protobuf::util::MessageToJsonString(*dynamic_message->msg_, &json, json_options); - // This should always succeed unless something crash-worthy such as out-of-memory. - RELEASE_ASSERT(status.ok(), ""); - return json; -} - -void VersionConverter::prepareMessageForGrpcWire(Protobuf::Message& message, - envoy::config::core::v3::ApiVersion api_version) { - // TODO(htuch): this works as long as there are no new fields in the v3+ - // DiscoveryRequest. When they are added, we need to do a full v2 conversion - // and also discard unknown fields. Tracked at - // https://github.com/envoyproxy/envoy/issues/9619. - if (api_version == envoy::config::core::v3::ApiVersion::V3) { - VersionUtil::scrubHiddenEnvoyDeprecated(message); - } - eraseOriginalTypeInformation(message); -} - -void VersionUtil::scrubHiddenEnvoyDeprecated(Protobuf::Message& message) { - class HiddenFieldScrubbingProtoVisitor : public ProtobufMessage::ProtoVisitor { - public: - const void* onField(Protobuf::Message& message, const Protobuf::FieldDescriptor& field, - const void*) override { - const Protobuf::Reflection* reflection = message.GetReflection(); - if (absl::StartsWith(field.name(), DeprecatedFieldShadowPrefix)) { - reflection->ClearField(&message, &field); - } - return nullptr; - } - }; - HiddenFieldScrubbingProtoVisitor proto_visitor; - ProtobufMessage::traverseMutableMessage(proto_visitor, message, nullptr); -} - -const char VersionUtil::DeprecatedFieldShadowPrefix[] = "hidden_envoy_deprecated_"; - -} // namespace Config -} // namespace Envoy diff --git a/source/common/config/version_converter.h b/source/common/config/version_converter.h deleted file mode 100644 index a7f5db3e9d8a9..0000000000000 --- a/source/common/config/version_converter.h +++ /dev/null @@ -1,133 +0,0 @@ -#pragma once - -#include "envoy/config/core/v3/config_source.pb.h" - -#include "source/common/protobuf/protobuf.h" - -// Convenience macro for downgrading a message and obtaining a reference. -#define API_DOWNGRADE(msg) (*Envoy::Config::VersionConverter::downgrade(msg)->msg_) - -// Convenience macro for recovering original message and obtaining a reference. -#define API_RECOVER_ORIGINAL(msg) (*Envoy::Config::VersionConverter::recoverOriginal(msg)->msg_) - -namespace Envoy { -namespace Config { - -// An instance of a dynamic message from a DynamicMessageFactory. -struct DynamicMessage { - // The dynamic message factory must outlive the message. - Protobuf::DynamicMessageFactory dynamic_msg_factory_; - - // Dynamic message. - ProtobufTypes::MessagePtr msg_; -}; - -using DynamicMessagePtr = std::unique_ptr; - -class VersionConverter { -public: - /** - * Upgrade a message from an earlier to later version of the Envoy API. This - * performs a simple wire-level reinterpretation of the fields. As a result of - * shadow protos, earlier deprecated fields such as foo are materialized as - * hidden_envoy_deprecated_foo. - * - * This should be used when you have wire input (e.g. bootstrap, xDS, some - * opaque config) that might be at any supported version and want to upgrade - * to Envoy's internal latest API usage. - * - * @param prev_message previous version message input. - * @param next_message next version message to generate. - * - * @throw EnvoyException if a Protobuf (de)serialization error occurs. - */ - static void upgrade(const Protobuf::Message& prev_message, Protobuf::Message& next_message); - - /** - * Downgrade a message to the previous version. If no previous version exists, - * the given message is copied in the return value. This is not super - * efficient, most uses are expected to be tests and performance agnostic - * code. - * - * This is used primarily in tests, to allow tests to internally use the - * latest supported API but ensure that earlier versions are used on the wire. - * - * @param message message input. - * @return DynamicMessagePtr with the downgraded message (and associated - * factory state). - * - * @throw EnvoyException if a Protobuf (de)serialization error occurs. - */ - static DynamicMessagePtr downgrade(const Protobuf::Message& message); - - /** - * Obtain JSON wire representation for an Envoy internal API message at v3 - * based on a given transport API version. This will downgrade() to an earlier - * version or scrub the shadow deprecated fields in the existing one. - * - * This is typically used when Envoy is generating a JSON wire message from - * some internally generated message, e.g. DiscoveryRequest, and we want to - * ensure it matches a specific API version. For example, a v3 - * DiscoveryRequest must have any deprecated v2 fields removed (they only - * exist because of shadowing) and a v2 DiscoveryRequest needs to have type - * envoy.api.v2.DiscoveryRequest to ensure JSON representations have the - * correct field names (after renames/deprecations are reversed). - * - * @param message message input. - * @param api_version target API version. - * @return std::string JSON representation. - */ - static std::string getJsonStringFromMessage(const Protobuf::Message& message, - envoy::config::core::v3::ApiVersion api_version); - - /** - * Modify a v3 message to make it suitable for sending as a gRPC message. This - * requires that a v3 message has hidden_envoy_deprecated_* fields removed, - * and that for all versions that original type information is removed. - * - * @param message message to modify. - * @param api_version target API version. - */ - static void prepareMessageForGrpcWire(Protobuf::Message& message, - envoy::config::core::v3::ApiVersion api_version); - - /** - * Annotate an upgraded message with original message type information. - * - * @param prev_descriptor descriptor for original type. - * @param upgraded_message upgraded message. - */ - static void annotateWithOriginalType(const Protobuf::Descriptor& prev_descriptor, - Protobuf::Message& upgraded_message); - - /** - * For a message that may have been upgraded, recover the original message. - * This is useful for config dump, debug output etc. - * - * @param upgraded_message upgraded message input. - * - * @return DynamicMessagePtr original message (as a dynamic message). - * - * @throw EnvoyException if a Protobuf (de)serialization error occurs. - */ - static DynamicMessagePtr recoverOriginal(const Protobuf::Message& upgraded_message); - - /** - * Remove original type information, when it's not needed, e.g. in tests. - * - * @param message upgraded message to scrub. - */ - static void eraseOriginalTypeInformation(Protobuf::Message& message); -}; - -class VersionUtil { -public: - // Some helpers for working with earlier message version deprecated fields. - static void scrubHiddenEnvoyDeprecated(Protobuf::Message& message); - - // A prefix that is added to deprecated fields names upon shadowing. - static const char DeprecatedFieldShadowPrefix[]; -}; - -} // namespace Config -} // namespace Envoy diff --git a/source/common/config/xds_mux/BUILD b/source/common/config/xds_mux/BUILD index f934e46e19dc4..323f17a5d8bb3 100644 --- a/source/common/config/xds_mux/BUILD +++ b/source/common/config/xds_mux/BUILD @@ -51,3 +51,22 @@ envoy_cc_library( "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "grpc_mux_lib", + srcs = ["grpc_mux_impl.cc"], + hdrs = ["grpc_mux_impl.h"], + deps = [ + ":delta_subscription_state_lib", + ":sotw_subscription_state_lib", + "//envoy/event:dispatcher_interface", + "//envoy/grpc:async_client_interface", + "//source/common/config:grpc_stream_lib", + "//source/common/config:pausable_ack_queue_lib", + "//source/common/config:watch_map_lib", + "//source/common/config:xds_context_params_lib", + "//source/common/config:xds_resource_lib", + "//source/common/memory:utils_lib", + "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", + ], +) diff --git a/source/common/config/xds_mux/delta_subscription_state.h b/source/common/config/xds_mux/delta_subscription_state.h index 801bd5edc0c1a..ced0c9fd52f05 100644 --- a/source/common/config/xds_mux/delta_subscription_state.h +++ b/source/common/config/xds_mux/delta_subscription_state.h @@ -93,6 +93,20 @@ class DeltaSubscriptionState std::set names_removed_; }; +class DeltaSubscriptionStateFactory : public SubscriptionStateFactory { +public: + DeltaSubscriptionStateFactory(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {} + ~DeltaSubscriptionStateFactory() override = default; + std::unique_ptr + makeSubscriptionState(const std::string& type_url, UntypedConfigUpdateCallbacks& callbacks, + OpaqueResourceDecoder&, const bool wildcard) override { + return std::make_unique(type_url, callbacks, dispatcher_, wildcard); + } + +private: + Event::Dispatcher& dispatcher_; +}; + } // namespace XdsMux } // namespace Config } // namespace Envoy diff --git a/source/common/config/xds_mux/grpc_mux_impl.cc b/source/common/config/xds_mux/grpc_mux_impl.cc new file mode 100644 index 0000000000000..c2eba80b4ad81 --- /dev/null +++ b/source/common/config/xds_mux/grpc_mux_impl.cc @@ -0,0 +1,401 @@ +#include "source/common/config/xds_mux/grpc_mux_impl.h" + +#include "envoy/service/discovery/v3/discovery.pb.h" + +#include "source/common/common/assert.h" +#include "source/common/common/backoff_strategy.h" +#include "source/common/config/decoded_resource_impl.h" +#include "source/common/config/utility.h" +#include "source/common/config/xds_context_params.h" +#include "source/common/config/xds_resource.h" +#include "source/common/memory/utils.h" +#include "source/common/protobuf/protobuf.h" +#include "source/common/protobuf/utility.h" + +namespace Envoy { +namespace Config { +namespace XdsMux { + +namespace { +class AllMuxesState { +public: + void insert(ShutdownableMux* mux) { muxes_.insert(mux); } + + void erase(ShutdownableMux* mux) { muxes_.erase(mux); } + + void shutdownAll() { + for (auto& mux : muxes_) { + mux->shutdown(); + } + } + +private: + absl::flat_hash_set muxes_; +}; +using AllMuxes = ThreadSafeSingleton; +} // namespace + +template +GrpcMuxImpl::GrpcMuxImpl(std::unique_ptr subscription_state_factory, + bool skip_subsequent_node, + const LocalInfo::LocalInfo& local_info, + envoy::config::core::v3::ApiVersion transport_api_version, + Grpc::RawAsyncClientPtr&& async_client, + Event::Dispatcher& dispatcher, + const Protobuf::MethodDescriptor& service_method, + Random::RandomGenerator& random, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings) + : grpc_stream_(this, std::move(async_client), service_method, random, dispatcher, scope, + rate_limit_settings), + subscription_state_factory_(std::move(subscription_state_factory)), + skip_subsequent_node_(skip_subsequent_node), local_info_(local_info), + dynamic_update_callback_handle_(local_info.contextProvider().addDynamicContextUpdateCallback( + [this](absl::string_view resource_type_url) { + onDynamicContextUpdate(resource_type_url); + })), + transport_api_version_(transport_api_version) { + Config::Utility::checkLocalInfo("ads", local_info); + AllMuxes::get().insert(this); +} + +template GrpcMuxImpl::~GrpcMuxImpl() { + AllMuxes::get().erase(this); +} + +template void GrpcMuxImpl::shutdownAll() { + AllMuxes::get().shutdownAll(); +} + +template +void GrpcMuxImpl::onDynamicContextUpdate(absl::string_view resource_type_url) { + ENVOY_LOG(debug, "GrpcMuxImpl::onDynamicContextUpdate for {}", resource_type_url); + auto sub = subscriptions_.find(resource_type_url); + if (sub == subscriptions_.end()) { + return; + } + sub->second->setDynamicContextChanged(); + trySendDiscoveryRequests(); +} + +template +Config::GrpcMuxWatchPtr GrpcMuxImpl::addWatch( + const std::string& type_url, const absl::flat_hash_set& resources, + SubscriptionCallbacks& callbacks, OpaqueResourceDecoder& resource_decoder, + const SubscriptionOptions& options) { + auto watch_map = watch_maps_.find(type_url); + if (watch_map == watch_maps_.end()) { + // We don't yet have a subscription for type_url! Make one! + watch_map = + watch_maps_.emplace(type_url, std::make_unique(options.use_namespace_matching_)) + .first; + subscriptions_.emplace( + type_url, subscription_state_factory_->makeSubscriptionState( + type_url, *watch_maps_[type_url], resource_decoder, resources.empty())); + subscription_ordering_.emplace_back(type_url); + } + + Watch* watch = watch_map->second->addWatch(callbacks, resource_decoder); + // updateWatch() queues a discovery request if any of 'resources' are not yet subscribed. + updateWatch(type_url, watch, resources, options); + return std::make_unique(type_url, watch, *this, options); +} + +// Updates the list of resource names watched by the given watch. If an added name is new across +// the whole subscription, or if a removed name has no other watch interested in it, then the +// subscription will enqueue and attempt to send an appropriate discovery request. +template +void GrpcMuxImpl::updateWatch(const std::string& type_url, Watch* watch, + const absl::flat_hash_set& resources, + const SubscriptionOptions& options) { + ENVOY_LOG(debug, "GrpcMuxImpl::updateWatch for {}", type_url); + ASSERT(watch != nullptr); + auto& sub = subscriptionStateFor(type_url); + WatchMap& watch_map = watchMapFor(type_url); + + // We need to prepare xdstp:// resources for the transport, by normalizing and adding any extra + // context parameters. + absl::flat_hash_set effective_resources; + for (const auto& resource : resources) { + if (XdsResourceIdentifier::hasXdsTpScheme(resource)) { + auto xdstp_resource = XdsResourceIdentifier::decodeUrn(resource); + if (options.add_xdstp_node_context_params_) { + const auto context = XdsContextParams::encodeResource( + local_info_.contextProvider().nodeContext(), xdstp_resource.context(), {}, {}); + xdstp_resource.mutable_context()->CopyFrom(context); + } + XdsResourceIdentifier::EncodeOptions encode_options; + encode_options.sort_context_params_ = true; + effective_resources.insert(XdsResourceIdentifier::encodeUrn(xdstp_resource, encode_options)); + } else { + effective_resources.insert(resource); + } + } + + auto added_removed = watch_map.updateWatchInterest(watch, effective_resources); + if (options.use_namespace_matching_) { + // This is to prevent sending out of requests that contain prefixes instead of resource names + sub.updateSubscriptionInterest({}, {}); + } else { + sub.updateSubscriptionInterest(added_removed.added_, added_removed.removed_); + } + + // Tell the server about our change in interest, if any. + if (sub.subscriptionUpdatePending()) { + trySendDiscoveryRequests(); + } +} + +template +void GrpcMuxImpl::removeWatch(const std::string& type_url, Watch* watch) { + updateWatch(type_url, watch, {}, {}); + watchMapFor(type_url).removeWatch(watch); +} + +template +ScopedResume GrpcMuxImpl::pause(const std::string& type_url) { + return pause(std::vector{type_url}); +} + +template +ScopedResume GrpcMuxImpl::pause(const std::vector type_urls) { + for (const auto& type_url : type_urls) { + pausable_ack_queue_.pause(type_url); + } + + return std::make_unique([this, type_urls]() { + for (const auto& type_url : type_urls) { + pausable_ack_queue_.resume(type_url); + trySendDiscoveryRequests(); + } + }); +} + +template +void GrpcMuxImpl::sendGrpcMessage(RQ& msg_proto, S& sub_state) { + if (sub_state.dynamicContextChanged() || !anyRequestSentYetInCurrentStream() || + !skipSubsequentNode()) { + msg_proto.mutable_node()->CopyFrom(localInfo().node()); + } + sendMessage(msg_proto); + setAnyRequestSentYetInCurrentStream(true); + sub_state.clearDynamicContextChanged(); +} + +template +void GrpcMuxImpl::genericHandleResponse(const std::string& type_url, + const RS& response_proto, + ControlPlaneStats& control_plane_stats) { + auto sub = subscriptions_.find(type_url); + if (sub == subscriptions_.end()) { + ENVOY_LOG(warn, + "The server sent an xDS response proto with type_url {}, which we have " + "not subscribed to. Ignoring.", + type_url); + return; + } + + if (response_proto.has_control_plane()) { + control_plane_stats.identifier_.set(response_proto.control_plane().identifier()); + } + + if (response_proto.control_plane().identifier() != sub->second->controlPlaneIdentifier()) { + sub->second->setControlPlaneIdentifier(response_proto.control_plane().identifier()); + ENVOY_LOG(debug, "Receiving gRPC updates for {} from {}", response_proto.type_url(), + sub->second->controlPlaneIdentifier()); + } + + pausable_ack_queue_.push(sub->second->handleResponse(response_proto)); + trySendDiscoveryRequests(); + Memory::Utils::tryShrinkHeap(); +} + +template void GrpcMuxImpl::start() { + ENVOY_LOG(debug, "GrpcMuxImpl now trying to establish a stream"); + grpc_stream_.establishNewStream(); +} + +template +void GrpcMuxImpl::handleEstablishedStream() { + ENVOY_LOG(debug, "GrpcMuxImpl stream successfully established"); + for (auto& [type_url, subscription_state] : subscriptions_) { + subscription_state->markStreamFresh(); + } + setAnyRequestSentYetInCurrentStream(false); + maybeUpdateQueueSizeStat(0); + pausable_ack_queue_.clear(); + trySendDiscoveryRequests(); +} + +template +void GrpcMuxImpl::handleStreamEstablishmentFailure() { + ENVOY_LOG(debug, "GrpcMuxImpl stream failed to establish"); + // If this happens while Envoy is still initializing, the onConfigUpdateFailed() we ultimately + // call on CDS will cause LDS to start up, which adds to subscriptions_ here. So, to avoid a + // crash, the iteration needs to dance around a little: collect pointers to all + // SubscriptionStates, call on all those pointers we haven't yet called on, repeat if there are + // now more SubscriptionStates. + absl::flat_hash_map all_subscribed; + absl::flat_hash_map already_called; + do { + for (auto& [type_url, subscription_state] : subscriptions_) { + all_subscribed[type_url] = subscription_state.get(); + } + for (auto& sub : all_subscribed) { + if (already_called.insert(sub).second) { // insert succeeded ==> not already called + sub.second->handleEstablishmentFailure(); + } + } + } while (all_subscribed.size() != subscriptions_.size()); +} + +template +S& GrpcMuxImpl::subscriptionStateFor(const std::string& type_url) { + auto sub = subscriptions_.find(type_url); + RELEASE_ASSERT(sub != subscriptions_.end(), + fmt::format("Tried to look up SubscriptionState for non-existent subscription {}.", + type_url)); + return *sub->second; +} + +template +WatchMap& GrpcMuxImpl::watchMapFor(const std::string& type_url) { + auto watch_map = watch_maps_.find(type_url); + RELEASE_ASSERT( + watch_map != watch_maps_.end(), + fmt::format("Tried to look up WatchMap for non-existent subscription {}.", type_url)); + return *watch_map->second; +} + +template +void GrpcMuxImpl::trySendDiscoveryRequests() { + if (shutdown_) { + return; + } + + while (true) { + // Do any of our subscriptions even want to send a request? + absl::optional request_type_if_any = whoWantsToSendDiscoveryRequest(); + if (!request_type_if_any.has_value()) { + break; + } + // If so, which one (by type_url)? + std::string next_request_type_url = request_type_if_any.value(); + auto& sub = subscriptionStateFor(next_request_type_url); + ENVOY_LOG(debug, "GrpcMuxImpl wants to send discovery request for {}", next_request_type_url); + // Try again later if paused/rate limited/stream down. + if (!canSendDiscoveryRequest(next_request_type_url)) { + break; + } + std::unique_ptr request; + // Get our subscription state to generate the appropriate discovery request, and send. + if (!pausable_ack_queue_.empty()) { + // Because ACKs take precedence over plain requests, if there is anything in the queue, it's + // safe to assume it's of the type_url that we're wanting to send. + // + // getNextRequestWithAck() returns a raw unowned pointer, which sendGrpcMessage deletes. + request = sub.getNextRequestWithAck(pausable_ack_queue_.popFront()); + ENVOY_LOG(debug, "GrpcMuxImpl sent ACK discovery request for {}", next_request_type_url); + } else { + // Returns a raw unowned pointer, which sendGrpcMessage deletes. + request = sub.getNextRequestAckless(); + ENVOY_LOG(debug, "GrpcMuxImpl sent non-ACK discovery request for {}", next_request_type_url); + } + ENVOY_LOG(debug, "GrpcMuxImpl skip_subsequent_node: {}", skipSubsequentNode()); + sendGrpcMessage(*request, sub); + } + maybeUpdateQueueSizeStat(pausable_ack_queue_.size()); +} + +// Checks whether external conditions allow sending a discovery request. (Does not check +// whether we *want* to send a discovery request). +template +bool GrpcMuxImpl::canSendDiscoveryRequest(const std::string& type_url) { + RELEASE_ASSERT( + !pausable_ack_queue_.paused(type_url), + fmt::format("canSendDiscoveryRequest() called on paused type_url {}. Pausedness is " + "supposed to be filtered out by whoWantsToSendDiscoveryRequest(). ", + type_url)); + + if (!grpcStreamAvailable()) { + ENVOY_LOG(trace, "No stream available to send a discovery request for {}.", type_url); + return false; + } else if (!rateLimitAllowsDrain()) { + ENVOY_LOG(trace, "{} discovery request hit rate limit; will try later.", type_url); + return false; + } + return true; +} + +// Checks whether we have something to say in a discovery request, which can be an ACK and/or +// a subscription update. (Does not check whether we *can* send that discovery request). +// Returns the type_url we should send the discovery request for (if any). +// First, prioritizes ACKs over non-ACK subscription interest updates. +// Then, prioritizes non-ACK updates in the order the various types +// of subscriptions were activated. +template +absl::optional GrpcMuxImpl::whoWantsToSendDiscoveryRequest() { + // All ACKs are sent before plain updates. trySendDiscoveryRequests() relies on this. So, choose + // type_url from pausable_ack_queue_ if possible, before looking at pending updates. + if (!pausable_ack_queue_.empty()) { + return pausable_ack_queue_.front().type_url_; + } + // If we're looking to send multiple non-ACK requests, send them in the order that their + // subscriptions were initiated. + for (const auto& sub_type : subscription_ordering_) { + auto& sub = subscriptionStateFor(sub_type); + if (sub.subscriptionUpdatePending() && !pausable_ack_queue_.paused(sub_type)) { + return sub_type; + } + } + return absl::nullopt; +} + +template class GrpcMuxImpl; +template class GrpcMuxImpl; + +// Delta- and SotW-specific concrete subclasses: +GrpcMuxDelta::GrpcMuxDelta(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, + const Protobuf::MethodDescriptor& service_method, + envoy::config::core::v3::ApiVersion transport_api_version, + Random::RandomGenerator& random, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings, + const LocalInfo::LocalInfo& local_info, bool skip_subsequent_node) + : GrpcMuxImpl(std::make_unique(dispatcher), skip_subsequent_node, + local_info, transport_api_version, std::move(async_client), dispatcher, + service_method, random, scope, rate_limit_settings) {} + +// GrpcStreamCallbacks for GrpcMuxDelta +void GrpcMuxDelta::requestOnDemandUpdate(const std::string& type_url, + const absl::flat_hash_set& for_update) { + auto& sub = subscriptionStateFor(type_url); + sub.updateSubscriptionInterest(for_update, {}); + // Tell the server about our change in interest, if any. + if (sub.subscriptionUpdatePending()) { + trySendDiscoveryRequests(); + } +} + +GrpcMuxSotw::GrpcMuxSotw(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, + const Protobuf::MethodDescriptor& service_method, + envoy::config::core::v3::ApiVersion transport_api_version, + Random::RandomGenerator& random, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings, + const LocalInfo::LocalInfo& local_info, bool skip_subsequent_node) + : GrpcMuxImpl(std::make_unique(dispatcher), skip_subsequent_node, + local_info, transport_api_version, std::move(async_client), dispatcher, + service_method, random, scope, rate_limit_settings) {} + +Config::GrpcMuxWatchPtr NullGrpcMuxImpl::addWatch(const std::string&, + const absl::flat_hash_set&, + SubscriptionCallbacks&, OpaqueResourceDecoder&, + const SubscriptionOptions&) { + throw EnvoyException("ADS must be configured to support an ADS config source"); +} + +} // namespace XdsMux +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/xds_mux/grpc_mux_impl.h b/source/common/config/xds_mux/grpc_mux_impl.h new file mode 100644 index 0000000000000..a1ec7f2332dd1 --- /dev/null +++ b/source/common/config/xds_mux/grpc_mux_impl.h @@ -0,0 +1,270 @@ +#pragma once + +#include +#include +#include + +#include "envoy/common/random_generator.h" +#include "envoy/common/time.h" +#include "envoy/common/token_bucket.h" +#include "envoy/config/grpc_mux.h" +#include "envoy/config/subscription.h" +#include "envoy/event/dispatcher.h" +#include "envoy/grpc/status.h" +#include "envoy/service/discovery/v3/discovery.pb.h" +#include "envoy/upstream/cluster_manager.h" + +#include "source/common/common/logger.h" +#include "source/common/common/utility.h" +#include "source/common/config/api_version.h" +#include "source/common/config/grpc_stream.h" +#include "source/common/config/pausable_ack_queue.h" +#include "source/common/config/watch_map.h" +#include "source/common/config/xds_mux/delta_subscription_state.h" +#include "source/common/config/xds_mux/sotw_subscription_state.h" +#include "source/common/grpc/common.h" + +#include "absl/container/node_hash_map.h" + +namespace Envoy { +namespace Config { +namespace XdsMux { + +class ShutdownableMux { +public: + virtual ~ShutdownableMux() = default; + virtual void shutdown() PURE; +}; + +// Manages subscriptions to one or more type of resource. The logical protocol +// state of those subscription(s) is handled by SubscriptionState. +// This class owns the GrpcStream used to talk to the server, maintains queuing +// logic to properly order the subscription(s)' various messages, and allows +// starting/stopping/pausing of the subscriptions. +// +// @tparam S SubscriptionState state type, either SotwSubscriptionState or DeltaSubscriptionState +// @tparam F SubscriptionStateFactory type, either SotwSubscriptionStateFactory or +// DeltaSubscriptionStateFactory +// @tparam RQ Xds request type, either envoy::service::discovery::v3::DiscoveryRequest or +// envoy::service::discovery::v3::DeltaDiscoveryRequest +// @tparam RS Xds response type, either envoy::service::discovery::v3::DiscoveryResponse or +// envoy::service::discovery::v3::DeltaDiscoveryResponse +// +template +class GrpcMuxImpl : public GrpcStreamCallbacks, + public GrpcMux, + public ShutdownableMux, + Logger::Loggable { +public: + GrpcMuxImpl(std::unique_ptr subscription_state_factory, bool skip_subsequent_node, + const LocalInfo::LocalInfo& local_info, + envoy::config::core::v3::ApiVersion transport_api_version, + Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, + const Protobuf::MethodDescriptor& service_method, Random::RandomGenerator& random, + Stats::Scope& scope, const RateLimitSettings& rate_limit_settings); + + ~GrpcMuxImpl() override; + + // Causes all GrpcMuxImpl objects to stop sending any messages on `grpc_stream_` to fix a crash + // on Envoy shutdown due to dangling pointers. This may not be the ideal fix; it is probably + // preferable for the `ServerImpl` to cause all configuration subscriptions to be shutdown, which + // would then cause all `GrpcMuxImpl` to be destructed. + // TODO: figure out the correct fix: https://github.com/envoyproxy/envoy/issues/15072. + static void shutdownAll(); + + void shutdown() override { shutdown_ = true; } + + // TODO (dmitri-d) return a naked pointer instead of the wrapper once the legacy mux has been + // removed and the mux interface can be changed + Config::GrpcMuxWatchPtr addWatch(const std::string& type_url, + const absl::flat_hash_set& resources, + SubscriptionCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, + const SubscriptionOptions& options) override; + void updateWatch(const std::string& type_url, Watch* watch, + const absl::flat_hash_set& resources, + const SubscriptionOptions& options); + void removeWatch(const std::string& type_url, Watch* watch); + + ScopedResume pause(const std::string& type_url) override; + ScopedResume pause(const std::vector type_urls) override; + void start() override; + const absl::flat_hash_map>& subscriptions() const { + return subscriptions_; + } + bool isUnified() const override { return true; } + + // GrpcStreamCallbacks + void onStreamEstablished() override { handleEstablishedStream(); } + void onEstablishmentFailure() override { handleStreamEstablishmentFailure(); } + void onWriteable() override { trySendDiscoveryRequests(); } + void onDiscoveryResponse(std::unique_ptr&& message, + ControlPlaneStats& control_plane_stats) override { + genericHandleResponse(message->type_url(), *message, control_plane_stats); + } + + GrpcStream& grpcStreamForTest() { return grpc_stream_; } + +protected: + class WatchImpl : public Envoy::Config::GrpcMuxWatch { + public: + WatchImpl(const std::string& type_url, Watch* watch, GrpcMuxImpl& parent, + const SubscriptionOptions& options) + : type_url_(type_url), watch_(watch), parent_(parent), options_(options) {} + + ~WatchImpl() override { remove(); } + + void remove() { + if (watch_) { + parent_.removeWatch(type_url_, watch_); + watch_ = nullptr; + } + } + + void update(const absl::flat_hash_set& resources) override { + parent_.updateWatch(type_url_, watch_, resources, options_); + } + + private: + const std::string type_url_; + Watch* watch_; + GrpcMuxImpl& parent_; + const SubscriptionOptions options_; + }; + + void sendGrpcMessage(RQ& msg_proto, S& sub_state); + void maybeUpdateQueueSizeStat(uint64_t size) { grpc_stream_.maybeUpdateQueueSizeStat(size); } + bool grpcStreamAvailable() { return grpc_stream_.grpcStreamAvailable(); } + bool rateLimitAllowsDrain() { return grpc_stream_.checkRateLimitAllowsDrain(); } + void sendMessage(RQ& msg_proto) { grpc_stream_.sendMessage(msg_proto); } + + S& subscriptionStateFor(const std::string& type_url); + WatchMap& watchMapFor(const std::string& type_url); + void handleEstablishedStream(); + void handleStreamEstablishmentFailure(); + void genericHandleResponse(const std::string& type_url, const RS& response_proto, + ControlPlaneStats& control_plane_stats); + void trySendDiscoveryRequests(); + bool skipSubsequentNode() const { return skip_subsequent_node_; } + bool anyRequestSentYetInCurrentStream() const { return any_request_sent_yet_in_current_stream_; } + void setAnyRequestSentYetInCurrentStream(bool value) { + any_request_sent_yet_in_current_stream_ = value; + } + const LocalInfo::LocalInfo& localInfo() const { return local_info_; } + const envoy::config::core::v3::ApiVersion& transportApiVersion() const { + return transport_api_version_; + } + +private: + // Checks whether external conditions allow sending a DeltaDiscoveryRequest. (Does not check + // whether we *want* to send a (Delta)DiscoveryRequest). + bool canSendDiscoveryRequest(const std::string& type_url); + + // Checks whether we have something to say in a (Delta)DiscoveryRequest, which can be an ACK + // and/or a subscription update. (Does not check whether we *can* send that + // (Delta)DiscoveryRequest). Returns the type_url we should send the DeltaDiscoveryRequest for (if + // any). First, prioritizes ACKs over non-ACK subscription interest updates. Then, prioritizes + // non-ACK updates in the order the various types of subscriptions were activated (as tracked by + // subscription_ordering_). + absl::optional whoWantsToSendDiscoveryRequest(); + + // Invoked when dynamic context parameters change for a resource type. + void onDynamicContextUpdate(absl::string_view resource_type_url); + + GrpcStream grpc_stream_; + + // Resource (N)ACKs we're waiting to send, stored in the order that they should be sent in. All + // of our different resource types' ACKs are mixed together in this queue. See class for + // description of how it interacts with pause() and resume(). + PausableAckQueue pausable_ack_queue_; + + // Makes SubscriptionStates, to be held in the subscriptions_ map. Whether this GrpcMux is doing + // delta or state of the world xDS is determined by which concrete subclass this variable gets. + std::unique_ptr subscription_state_factory_; + + // Map key is type_url. + // Only addWatch() should insert into these maps. + absl::flat_hash_map> subscriptions_; + absl::flat_hash_map> watch_maps_; + + // Determines the order of initial discovery requests. (Assumes that subscriptions are added + // to this GrpcMux in the order of Envoy's dependency ordering). + std::list subscription_ordering_; + + // Whether to enable the optimization of only including the node field in the very first + // discovery request in an xDS gRPC stream (really just one: *not* per-type_url). + const bool skip_subsequent_node_; + + // State to help with skip_subsequent_node's logic. + bool any_request_sent_yet_in_current_stream_{}; + + // Used to populate the (Delta)DiscoveryRequest's node field. That field is the same across + // all type_urls, and moreover, the 'skip_subsequent_node' logic needs to operate across all + // the type_urls. So, while the SubscriptionStates populate every other field of these messages, + // this one is up to GrpcMux. + const LocalInfo::LocalInfo& local_info_; + Common::CallbackHandlePtr dynamic_update_callback_handle_; + const envoy::config::core::v3::ApiVersion transport_api_version_; + + // True iff Envoy is shutting down; no messages should be sent on the `grpc_stream_` when this is + // true because it may contain dangling pointers. + std::atomic shutdown_{false}; +}; + +class GrpcMuxDelta : public GrpcMuxImpl { +public: + GrpcMuxDelta(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, + const Protobuf::MethodDescriptor& service_method, + envoy::config::core::v3::ApiVersion transport_api_version, + Random::RandomGenerator& random, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info, + bool skip_subsequent_node); + + // GrpcStreamCallbacks + void requestOnDemandUpdate(const std::string& type_url, + const absl::flat_hash_set& for_update) override; +}; + +class GrpcMuxSotw : public GrpcMuxImpl { +public: + GrpcMuxSotw(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, + const Protobuf::MethodDescriptor& service_method, + envoy::config::core::v3::ApiVersion transport_api_version, + Random::RandomGenerator& random, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info, + bool skip_subsequent_node); + + // GrpcStreamCallbacks + void requestOnDemandUpdate(const std::string&, const absl::flat_hash_set&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } +}; + +class NullGrpcMuxImpl : public GrpcMux { +public: + void start() override {} + + ScopedResume pause(const std::string&) override { + return std::make_unique([]() {}); + } + ScopedResume pause(const std::vector) override { + return std::make_unique([]() {}); + } + + Config::GrpcMuxWatchPtr addWatch(const std::string&, const absl::flat_hash_set&, + SubscriptionCallbacks&, OpaqueResourceDecoder&, + const SubscriptionOptions&) override; + + // legacy mux interface not implemented by unified mux. + void requestOnDemandUpdate(const std::string&, const absl::flat_hash_set&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } +}; + +} // namespace XdsMux +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/xds_mux/sotw_subscription_state.cc b/source/common/config/xds_mux/sotw_subscription_state.cc index bb7a9f4c3a9c6..90be3a318a2a6 100644 --- a/source/common/config/xds_mux/sotw_subscription_state.cc +++ b/source/common/config/xds_mux/sotw_subscription_state.cc @@ -70,7 +70,7 @@ void SotwSubscriptionState::handleGoodResponse( } // TODO (dmitri-d) to eliminate decoding of resources twice consider expanding the interface to - // support passing of decoded resources + // support passing of decoded resources. This would also avoid a resource copy above. callbacks().onConfigUpdate(non_heartbeat_resources, message.version_info()); // Now that we're passed onConfigUpdate() without an exception thrown, we know we're good. last_good_version_info_ = message.version_info(); diff --git a/source/common/config/xds_mux/sotw_subscription_state.h b/source/common/config/xds_mux/sotw_subscription_state.h index 4d191fb93a3c4..86063198f5a7b 100644 --- a/source/common/config/xds_mux/sotw_subscription_state.h +++ b/source/common/config/xds_mux/sotw_subscription_state.h @@ -62,6 +62,21 @@ class SotwSubscriptionState absl::flat_hash_set names_tracked_; }; +class SotwSubscriptionStateFactory : public SubscriptionStateFactory { +public: + SotwSubscriptionStateFactory(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {} + ~SotwSubscriptionStateFactory() override = default; + std::unique_ptr + makeSubscriptionState(const std::string& type_url, UntypedConfigUpdateCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, const bool) override { + return std::make_unique(type_url, callbacks, dispatcher_, + resource_decoder); + } + +private: + Event::Dispatcher& dispatcher_; +}; + } // namespace XdsMux } // namespace Config } // namespace Envoy diff --git a/source/common/config/xds_mux/subscription_state.h b/source/common/config/xds_mux/subscription_state.h index 6607989745763..9f9b48cd7723a 100644 --- a/source/common/config/xds_mux/subscription_state.h +++ b/source/common/config/xds_mux/subscription_state.h @@ -47,6 +47,9 @@ class BaseSubscriptionState : public SubscriptionState, void clearDynamicContextChanged() { dynamic_context_changed_ = false; } bool dynamicContextChanged() const { return dynamic_context_changed_; } + void setControlPlaneIdentifier(const std::string& id) { control_plane_identifier_ = id; } + std::string& controlPlaneIdentifier() { return control_plane_identifier_; } + // Whether there was a change in our subscription interest we have yet to inform the server of. virtual bool subscriptionUpdatePending() const PURE; @@ -111,6 +114,17 @@ class BaseSubscriptionState : public SubscriptionState, UntypedConfigUpdateCallbacks& callbacks_; Event::Dispatcher& dispatcher_; bool dynamic_context_changed_{}; + std::string control_plane_identifier_{}; +}; + +template class SubscriptionStateFactory { +public: + virtual ~SubscriptionStateFactory() = default; + // Note that, outside of tests, we expect callbacks to always be a WatchMap. + virtual std::unique_ptr makeSubscriptionState(const std::string& type_url, + UntypedConfigUpdateCallbacks& callbacks, + OpaqueResourceDecoder& resource_decoder, + const bool wildcard) PURE; }; } // namespace XdsMux diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 76950f3ccde71..e91cc10709790 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -171,32 +171,32 @@ void ConnPoolImplBase::attachStreamToClient(Envoy::ConnectionPool::ActiveClient& onPoolFailure(client.real_host_description_, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, context); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); - } else { - ENVOY_CONN_LOG(debug, "creating stream", client); - - client.remaining_streams_--; - if (client.remaining_streams_ == 0) { - ENVOY_CONN_LOG(debug, "maximum streams per connection, DRAINING", client); - host_->cluster().stats().upstream_cx_max_requests_.inc(); - transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); - } else if (client.numActiveStreams() + 1 >= client.concurrent_stream_limit_) { - // As soon as the new stream is created, the client will be maxed out. - transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); - } - - // Decrement the capacity, as there's one less stream available for serving. - state_.decrConnectingAndConnectedStreamCapacity(1); - // Track the new active stream. - state_.incrActiveStreams(1); - num_active_streams_++; - host_->stats().rq_total_.inc(); - host_->stats().rq_active_.inc(); - host_->cluster().stats().upstream_rq_total_.inc(); - host_->cluster().stats().upstream_rq_active_.inc(); - host_->cluster().resourceManager(priority_).requests().inc(); + return; + } + ENVOY_CONN_LOG(debug, "creating stream", client); - onPoolReady(client, context); + client.remaining_streams_--; + if (client.remaining_streams_ == 0) { + ENVOY_CONN_LOG(debug, "maximum streams per connection, DRAINING", client); + host_->cluster().stats().upstream_cx_max_requests_.inc(); + transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); + } else if (client.numActiveStreams() + 1 >= client.concurrent_stream_limit_) { + // As soon as the new stream is created, the client will be maxed out. + transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); } + + // Decrement the capacity, as there's one less stream available for serving. + state_.decrConnectingAndConnectedStreamCapacity(1); + // Track the new active stream. + state_.incrActiveStreams(1); + num_active_streams_++; + host_->stats().rq_total_.inc(); + host_->stats().rq_active_.inc(); + host_->cluster().stats().upstream_rq_total_.inc(); + host_->cluster().stats().upstream_rq_active_.inc(); + host_->cluster().resourceManager(priority_).requests().inc(); + + onPoolReady(client, context); } void ConnPoolImplBase::onStreamClosed(Envoy::ConnectionPool::ActiveClient& client, @@ -228,7 +228,7 @@ void ConnPoolImplBase::onStreamClosed(Envoy::ConnectionPool::ActiveClient& clien } } -ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) { +ConnectionPool::Cancellable* ConnPoolImplBase::newStreamImpl(AttachContext& context) { ASSERT(!deferred_deleting_); ASSERT(static_cast(connecting_stream_capacity_) == @@ -242,42 +242,42 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) return nullptr; } - if (host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { - ConnectionPool::Cancellable* pending = newPendingStream(context); - ENVOY_LOG(debug, "trying to create new connection"); - ENVOY_LOG(trace, fmt::format("{}", *this)); - - auto old_capacity = connecting_stream_capacity_; - // This must come after newPendingStream() because this function uses the - // length of pending_streams_ to determine if a new connection is needed. - const ConnectionResult result = tryCreateNewConnections(); - // If there is not enough connecting capacity, the only reason to not - // increase capacity is if the connection limits are exceeded. - ENVOY_BUG(pending_streams_.size() <= connecting_stream_capacity_ || - connecting_stream_capacity_ > old_capacity || - (result == ConnectionResult::NoConnectionRateLimited || - result == ConnectionResult::FailedToCreateConnection), - fmt::format("Failed to create expected connection: {}", *this)); - if (result == ConnectionResult::FailedToCreateConnection) { - // This currently only happens for HTTP/3 if secrets aren't yet loaded. - // Trigger connection failure. - pending->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); - onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, - context); - return nullptr; - } else { - return pending; - } - } else { + if (!host_->cluster().resourceManager(priority_).pendingRequests().canCreate()) { ENVOY_LOG(debug, "max pending streams overflow"); onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, context); host_->cluster().stats().upstream_rq_pending_overflow_.inc(); return nullptr; } + + ConnectionPool::Cancellable* pending = newPendingStream(context); + ENVOY_LOG(debug, "trying to create new connection"); + ENVOY_LOG(trace, fmt::format("{}", *this)); + + auto old_capacity = connecting_stream_capacity_; + // This must come after newPendingStream() because this function uses the + // length of pending_streams_ to determine if a new connection is needed. + const ConnectionResult result = tryCreateNewConnections(); + // If there is not enough connecting capacity, the only reason to not + // increase capacity is if the connection limits are exceeded. + ENVOY_BUG(pending_streams_.size() <= connecting_stream_capacity_ || + connecting_stream_capacity_ > old_capacity || + (result == ConnectionResult::NoConnectionRateLimited || + result == ConnectionResult::FailedToCreateConnection), + fmt::format("Failed to create expected connection: {}", *this)); + if (result == ConnectionResult::FailedToCreateConnection) { + // This currently only happens for HTTP/3 if secrets aren't yet loaded. + // Trigger connection failure. + pending->cancel(Envoy::ConnectionPool::CancelPolicy::CloseExcess); + onPoolFailure(nullptr, absl::string_view(), ConnectionPool::PoolFailureReason::Overflow, + context); + return nullptr; + } + + return pending; } -bool ConnPoolImplBase::maybePreconnect(float global_preconnect_ratio) { +bool ConnPoolImplBase::maybePreconnectImpl(float global_preconnect_ratio) { ASSERT(!deferred_deleting_); return tryCreateNewConnection(global_preconnect_ratio) == ConnectionResult::CreatedNewConnection; } @@ -353,6 +353,8 @@ void ConnPoolImplBase::closeIdleConnectionsForDrainingPool() { } for (auto& entry : to_close) { + ENVOY_LOG_EVENT(debug, "closing_idle_client", "closing idle client {} for cluster {}", + entry->id(), host_->cluster().name()); entry->close(); } } @@ -364,6 +366,8 @@ void ConnPoolImplBase::drainConnectionsImpl() { // so all remaining entries in ready_clients_ are serving streams. Move them and all entries // in busy_clients_ to draining. while (!ready_clients_.empty()) { + ENVOY_LOG_EVENT(debug, "draining_ready_client", "draining active client {} for cluster {}", + ready_clients_.front()->id(), host_->cluster().name()); transitionActiveClientState(*ready_clients_.front(), ActiveClient::State::DRAINING); } @@ -371,6 +375,8 @@ void ConnPoolImplBase::drainConnectionsImpl() { // so use a for-loop since the list is not mutated. ASSERT(&owningList(ActiveClient::State::DRAINING) == &busy_clients_); for (auto& busy_client : busy_clients_) { + ENVOY_LOG_EVENT(debug, "draining_busy_client", "draining busy client {} for cluster {}", + busy_client->id(), host_->cluster().name()); transitionActiveClientState(*busy_client, ActiveClient::State::DRAINING); } } diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index 8e06e8e68ed2b..5ce4d9936feef 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -162,9 +162,15 @@ class ConnPoolImplBase : protected Logger::Loggable { int64_t connecting_and_connected_capacity, float preconnect_ratio, bool anticipate_incoming_stream = false); + // Envoy::ConnectionPool::Instance implementation helpers void addIdleCallbackImpl(Instance::IdleCb cb); + // Returns true if the pool is idle. + bool isIdleImpl() const; void startDrainImpl(); void drainConnectionsImpl(); + const Upstream::HostConstSharedPtr& host() const { return host_; } + // Called if this pool is likely to be picked soon, to determine if it's worth preconnecting. + bool maybePreconnectImpl(float global_preconnect_ratio); // Closes and destroys all connections. This must be called in the destructor of // derived classes because the derived ActiveClient will downcast parent_ to a more @@ -196,16 +202,11 @@ class ConnPoolImplBase : protected Logger::Loggable { void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, Network::ConnectionEvent event); - // Returns true if the pool is idle. - bool isIdleImpl() const; - // See if the pool has gone idle. If we're draining, this will also close idle connections. void checkForIdleAndCloseIdleConnsIfDraining(); void scheduleOnUpstreamReady(); - ConnectionPool::Cancellable* newStream(AttachContext& context); - // Called if this pool is likely to be picked soon, to determine if it's worth preconnecting. - bool maybePreconnect(float global_preconnect_ratio); + ConnectionPool::Cancellable* newStreamImpl(AttachContext& context); virtual ConnectionPool::Cancellable* newPendingStream(AttachContext& context) PURE; @@ -220,7 +221,6 @@ class ConnPoolImplBase : protected Logger::Loggable { // Called by derived classes any time a stream is completed or destroyed for any reason. void onStreamClosed(Envoy::ConnectionPool::ActiveClient& client, bool delay_attaching_stream); - const Upstream::HostConstSharedPtr& host() const { return host_; } Event::Dispatcher& dispatcher() { return dispatcher_; } Upstream::ResourcePriority priority() const { return priority_; } const Network::ConnectionSocket::OptionsSharedPtr& socketOptions() { return socket_options_; } diff --git a/source/common/event/BUILD b/source/common/event/BUILD index f847b9dd614fa..11ba35646e9af 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -47,6 +47,7 @@ envoy_cc_library( "//source/common/network:dns_lib", "//source/common/network:connection_lib", "//source/common/network:listener_lib", + "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", ] + select({ "//bazel:apple": ["//source/common/network:apple_dns_lib"], "//conditions:default": [], diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index c9c48cb31a657..a7a30838aee9c 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -8,6 +8,7 @@ #include "envoy/api/api.h" #include "envoy/common/scope_tracker.h" +#include "envoy/config/overload/v3/overload.pb.h" #include "envoy/network/listen_socket.h" #include "envoy/network/listener.h" @@ -61,7 +62,8 @@ DispatcherImpl::DispatcherImpl(const std::string& name, Api::Api& api, : name_(name), api_(api), buffer_factory_(watermark_factory != nullptr ? watermark_factory - : std::make_shared()), + : std::make_shared( + api.bootstrap().overload_manager().buffer_factory_config())), scheduler_(time_system.createScheduler(base_scheduler_, base_scheduler_)), thread_local_delete_cb_( base_scheduler_.createSchedulableCallback([this]() -> void { runThreadLocalDelete(); })), diff --git a/source/common/filesystem/file_shared_impl.h b/source/common/filesystem/file_shared_impl.h index fd81d03ec794e..de2f90aaeb598 100644 --- a/source/common/filesystem/file_shared_impl.h +++ b/source/common/filesystem/file_shared_impl.h @@ -17,6 +17,7 @@ class IoFileError : public Api::IoError { Api::IoError::IoErrorCode getErrorCode() const override; std::string getErrorDetails() const override; + int getSystemErrorCode() const override { return errno_; } private: const int errno_; diff --git a/source/common/filter/http/BUILD b/source/common/filter/BUILD similarity index 81% rename from source/common/filter/http/BUILD rename to source/common/filter/BUILD index 683467b9530ff..458f107ee9aa0 100644 --- a/source/common/filter/http/BUILD +++ b/source/common/filter/BUILD @@ -9,12 +9,12 @@ licenses(["notice"]) # Apache 2 envoy_package() envoy_cc_library( - name = "filter_config_discovery_lib", - srcs = ["filter_config_discovery_impl.cc"], - hdrs = ["filter_config_discovery_impl.h"], + name = "config_discovery_lib", + srcs = ["config_discovery_impl.cc"], + hdrs = ["config_discovery_impl.h"], deps = [ "//envoy/config:subscription_interface", - "//envoy/filter/http:filter_config_provider_interface", + "//envoy/filter:config_provider_manager_interface", "//envoy/singleton:instance_interface", "//envoy/stats:stats_macros", "//envoy/thread_local:thread_local_interface", diff --git a/source/common/filter/http/filter_config_discovery_impl.cc b/source/common/filter/config_discovery_impl.cc similarity index 67% rename from source/common/filter/http/filter_config_discovery_impl.cc rename to source/common/filter/config_discovery_impl.cc index 33c47f94eec93..74acc6b8383ef 100644 --- a/source/common/filter/http/filter_config_discovery_impl.cc +++ b/source/common/filter/config_discovery_impl.cc @@ -1,4 +1,4 @@ -#include "source/common/filter/http/filter_config_discovery_impl.h" +#include "source/common/filter/config_discovery_impl.h" #include "envoy/config/core/v3/extension.pb.h" #include "envoy/config/core/v3/extension.pb.validate.h" @@ -16,7 +16,6 @@ namespace Envoy { namespace Filter { -namespace Http { namespace { void validateTypeUrlHelper(const std::string& type_url, @@ -29,91 +28,49 @@ void validateTypeUrlHelper(const std::string& type_url, } // namespace -DynamicFilterConfigProviderImpl::DynamicFilterConfigProviderImpl( +DynamicFilterConfigProviderImplBase::DynamicFilterConfigProviderImplBase( FilterConfigSubscriptionSharedPtr& subscription, - const absl::flat_hash_set& require_type_urls, - - Server::Configuration::FactoryContext& factory_context, - Envoy::Http::FilterFactoryCb default_config, bool last_filter_in_filter_chain, + const absl::flat_hash_set& require_type_urls, bool last_filter_in_filter_chain, const std::string& filter_chain_type) : subscription_(subscription), require_type_urls_(require_type_urls), - default_configuration_(default_config ? absl::make_optional(default_config) : absl::nullopt), - tls_(factory_context.threadLocal()), init_target_("DynamicFilterConfigProviderImpl", - [this]() { - subscription_->start(); - // This init target is used to activate - // the subscription but not wait for a - // response. It is used whenever a default - // config is provided to be used while - // waiting for a response. - init_target_.ready(); - }), + init_target_("DynamicFilterConfigProviderImpl", + [this]() { + subscription_->start(); + // This init target is used to activate the subscription but not wait for a + // response. It is used whenever a default config is provided to be used while + // waiting for a response. + init_target_.ready(); + }), last_filter_in_filter_chain_(last_filter_in_filter_chain), filter_chain_type_(filter_chain_type) { subscription_->filter_config_providers_.insert(this); - tls_.set([](Event::Dispatcher&) { return std::make_shared(); }); } -DynamicFilterConfigProviderImpl::~DynamicFilterConfigProviderImpl() { +DynamicFilterConfigProviderImplBase::~DynamicFilterConfigProviderImplBase() { subscription_->filter_config_providers_.erase(this); } -void DynamicFilterConfigProviderImpl::validateTypeUrl(const std::string& type_url) const { +void DynamicFilterConfigProviderImplBase::validateTypeUrl(const std::string& type_url) const { validateTypeUrlHelper(type_url, require_type_urls_); } -const std::string& DynamicFilterConfigProviderImpl::name() { return subscription_->name(); } - -absl::optional DynamicFilterConfigProviderImpl::config() { - return tls_->config_; -} - -void DynamicFilterConfigProviderImpl::onConfigUpdate(Envoy::Http::FilterFactoryCb config, - const std::string&, - Config::ConfigAppliedCb cb) { - tls_.runOnAllThreads( - [config, cb](OptRef tls) { - tls->config_ = config; - if (cb) { - cb(); - } - }, - [this, config]() { - // This happens after all workers have discarded the previous config so it can be safely - // deleted on the main thread by an update with the new config. - this->current_config_ = config; - }); -} +const std::string& DynamicFilterConfigProviderImplBase::name() { return subscription_->name(); } -void DynamicFilterConfigProviderImpl::validateTerminalFilter(const std::string& name, - const std::string& filter_type, - bool is_terminal_filter) { +void DynamicFilterConfigProviderImplBase::validateTerminalFilter(const std::string& name, + const std::string& filter_type, + bool is_terminal_filter) { Config::Utility::validateTerminalFilters(name, filter_type, filter_chain_type_, is_terminal_filter, last_filter_in_filter_chain_); } -void DynamicFilterConfigProviderImpl::onConfigRemoved( - Config::ConfigAppliedCb applied_on_all_threads) { - tls_.runOnAllThreads( - [config = default_configuration_](OptRef tls) { tls->config_ = config; }, - [this, applied_on_all_threads]() { - // This happens after all workers have discarded the previous config so it can be safely - // deleted on the main thread by an update with the new config. - this->current_config_ = default_configuration_; - if (applied_on_all_threads) { - applied_on_all_threads(); - } - }); -} - FilterConfigSubscription::FilterConfigSubscription( const envoy::config::core::v3::ConfigSource& config_source, const std::string& filter_config_name, Server::Configuration::FactoryContext& factory_context, - const std::string& stat_prefix, FilterConfigProviderManagerImpl& filter_config_provider_manager, + const std::string& stat_prefix, + FilterConfigProviderManagerImplBase& filter_config_provider_manager, const std::string& subscription_id) : Config::SubscriptionBase( - envoy::config::core::v3::ApiVersion::V3, factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), filter_config_name_(filter_config_name), factory_context_(factory_context), validator_(factory_context.messageValidationContext().dynamicValidationVisitor()), @@ -179,9 +136,9 @@ void FilterConfigSubscription::onConfigUpdate( factory.createFilterFactoryFromProto(*message, stat_prefix_, factory_context_); ENVOY_LOG(debug, "Updating filter config {}", filter_config_name_); - Common::applyToAllWithCleanup( + Common::applyToAllWithCleanup( filter_config_providers_, - [&factory_callback, &version_info](DynamicFilterConfigProviderImpl* provider, + [&factory_callback, &version_info](DynamicFilterConfigProviderImplBase* provider, std::shared_ptr cleanup) { provider->onConfigUpdate(factory_callback, version_info, [cleanup] {}); }, @@ -200,9 +157,9 @@ void FilterConfigSubscription::onConfigUpdate( if (!removed_resources.empty()) { ASSERT(removed_resources.size() == 1); ENVOY_LOG(debug, "Removing filter config {}", filter_config_name_); - Common::applyToAllWithCleanup( + Common::applyToAllWithCleanup( filter_config_providers_, - [](DynamicFilterConfigProviderImpl* provider, std::shared_ptr cleanup) { + [](DynamicFilterConfigProviderImplBase* provider, std::shared_ptr cleanup) { provider->onConfigRemoved([cleanup] {}); }, [this]() { stats_.config_reload_.inc(); }); @@ -234,7 +191,7 @@ FilterConfigSubscription::~FilterConfigSubscription() { void FilterConfigSubscription::incrementConflictCounter() { stats_.config_conflict_.inc(); } -std::shared_ptr FilterConfigProviderManagerImpl::getSubscription( +std::shared_ptr FilterConfigProviderManagerImplBase::getSubscription( const envoy::config::core::v3::ConfigSource& config_source, const std::string& name, Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix) { // FilterConfigSubscriptions are unique based on their config source and filter config name @@ -256,10 +213,43 @@ std::shared_ptr FilterConfigProviderManagerImpl::getSu } } +void FilterConfigProviderManagerImplBase::applyLastOrDefaultConfig( + std::shared_ptr& subscription, + DynamicFilterConfigProviderImplBase& provider, const std::string& filter_config_name) { + // If the subscription already received a config, attempt to apply it. + // It is possible that the received extension config fails to satisfy the listener + // type URL constraints. This may happen if ECDS and LDS updates are racing, and the LDS + // update arrives first. In this case, use the default config, increment a metric, + // and the applied config eventually converges once ECDS update arrives. + bool last_config_valid = false; + if (subscription->lastConfig().has_value()) { + TRY_ASSERT_MAIN_THREAD { + provider.validateTypeUrl(subscription->lastTypeUrl()); + provider.validateTerminalFilter(filter_config_name, subscription->lastFilterName(), + subscription->isLastFilterTerminal()); + last_config_valid = true; + } + END_TRY catch (const EnvoyException& e) { + ENVOY_LOG(debug, "ECDS subscription {} is invalid in a listener context: {}.", + filter_config_name, e.what()); + subscription->incrementConflictCounter(); + } + if (last_config_valid) { + provider.onConfigUpdate(subscription->lastConfig().value(), subscription->lastVersionInfo(), + nullptr); + } + } + + // Apply the default config if none has been applied. + if (!last_config_valid) { + provider.applyDefaultConfiguration(); + } +} + DynamicFilterConfigProviderPtr FilterConfigProviderManagerImpl::createDynamicFilterConfigProvider( const envoy::config::core::v3::ExtensionConfigSource& config_source, const std::string& filter_config_name, Server::Configuration::FactoryContext& factory_context, - const std::string& stat_prefix, bool last_filter_in_filter_config, + const std::string& stat_prefix, bool last_filter_in_filter_chain, const std::string& filter_chain_type) { auto subscription = getSubscription(config_source.config_source(), filter_config_name, factory_context, stat_prefix); @@ -277,68 +267,45 @@ DynamicFilterConfigProviderPtr FilterConfigProviderManagerImpl::createDynamicFil Envoy::Http::FilterFactoryCb default_config = nullptr; if (config_source.has_default_config()) { - auto* default_factory = - Config::Utility::getFactoryByType( - config_source.default_config()); - if (default_factory == nullptr) { - throw EnvoyException(fmt::format("Error: cannot find filter factory {} for default filter " - "configuration with type URL {}.", - filter_config_name, - config_source.default_config().type_url())); - } - validateTypeUrlHelper(Config::Utility::getFactoryType(config_source.default_config()), - require_type_urls); - ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( - config_source.default_config(), factory_context.messageValidationVisitor(), - *default_factory); - Config::Utility::validateTerminalFilters( - filter_config_name, default_factory->name(), filter_chain_type, - default_factory->isTerminalFilterByProto(*message, factory_context), - last_filter_in_filter_config); - default_config = - default_factory->createFilterFactoryFromProto(*message, stat_prefix, factory_context); + default_config = getDefaultConfig(config_source.default_config(), filter_config_name, + factory_context, stat_prefix, last_filter_in_filter_chain, + filter_chain_type, require_type_urls); } auto provider = std::make_unique( - subscription, require_type_urls, factory_context, default_config, - last_filter_in_filter_config, filter_chain_type); + subscription, require_type_urls, factory_context, default_config, last_filter_in_filter_chain, + filter_chain_type); // Ensure the subscription starts if it has not already. if (config_source.apply_default_config_without_warming()) { - factory_context.initManager().add(provider->init_target_); - } - - // If the subscription already received a config, attempt to apply it. - // It is possible that the received extension config fails to satisfy the listener - // type URL constraints. This may happen if ECDS and LDS updates are racing, and the LDS - // update arrives first. In this case, use the default config, increment a metric, - // and the applied config eventually converges once ECDS update arrives. - bool last_config_valid = false; - if (subscription->lastConfig().has_value()) { - TRY_ASSERT_MAIN_THREAD { - provider->validateTypeUrl(subscription->lastTypeUrl()); - provider->validateTerminalFilter(filter_config_name, subscription->lastFilterName(), - subscription->isLastFilterTerminal()); - last_config_valid = true; - } - END_TRY catch (const EnvoyException& e) { - ENVOY_LOG(debug, "ECDS subscription {} is invalid in a listener context: {}.", - filter_config_name, e.what()); - subscription->incrementConflictCounter(); - } - if (last_config_valid) { - provider->onConfigUpdate(subscription->lastConfig().value(), subscription->lastVersionInfo(), - nullptr); - } + factory_context.initManager().add(provider->initTarget()); } + applyLastOrDefaultConfig(subscription, *provider, filter_config_name); + return provider; +} - // Apply the default config if none has been applied. - if (!last_config_valid) { - provider->applyDefaultConfiguration(); +Http::FilterFactoryCb HttpFilterConfigProviderManagerImpl::getDefaultConfig( + const ProtobufWkt::Any& proto_config, const std::string& filter_config_name, + Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, + bool last_filter_in_filter_chain, const std::string& filter_chain_type, + const absl::flat_hash_set require_type_urls) const { + auto* default_factory = + Config::Utility::getFactoryByType( + proto_config); + if (default_factory == nullptr) { + throw EnvoyException(fmt::format("Error: cannot find filter factory {} for default filter " + "configuration with type URL {}.", + filter_config_name, proto_config.type_url())); } - return provider; + validateTypeUrlHelper(Config::Utility::getFactoryType(proto_config), require_type_urls); + ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( + proto_config, factory_context.messageValidationVisitor(), *default_factory); + Config::Utility::validateTerminalFilters( + filter_config_name, default_factory->name(), filter_chain_type, + default_factory->isTerminalFilterByProto(*message, factory_context), + last_filter_in_filter_chain); + return default_factory->createFilterFactoryFromProto(*message, stat_prefix, factory_context); } -} // namespace Http } // namespace Filter } // namespace Envoy diff --git a/source/common/filter/http/filter_config_discovery_impl.h b/source/common/filter/config_discovery_impl.h similarity index 65% rename from source/common/filter/http/filter_config_discovery_impl.h rename to source/common/filter/config_discovery_impl.h index a207eca3099d0..7646f42176995 100644 --- a/source/common/filter/http/filter_config_discovery_impl.h +++ b/source/common/filter/config_discovery_impl.h @@ -4,7 +4,7 @@ #include "envoy/config/core/v3/extension.pb.validate.h" #include "envoy/config/extension_config_provider.h" #include "envoy/config/subscription.h" -#include "envoy/filter/http/filter_config_provider.h" +#include "envoy/filter/config_provider_manager.h" #include "envoy/http/filter.h" #include "envoy/protobuf/message_validator.h" #include "envoy/server/factory_context.h" @@ -22,38 +22,98 @@ namespace Envoy { namespace Filter { -namespace Http { -class FilterConfigProviderManagerImpl; +class FilterConfigProviderManagerImplBase; class FilterConfigSubscription; using FilterConfigSubscriptionSharedPtr = std::shared_ptr; +/** + * Base class for a filter config provider using discovery subscriptions. + **/ +class DynamicFilterConfigProviderImplBase + : public Config::DynamicExtensionConfigProviderBase { +public: + DynamicFilterConfigProviderImplBase(FilterConfigSubscriptionSharedPtr& subscription, + const absl::flat_hash_set& require_type_urls, + bool last_filter_in_filter_chain, + const std::string& filter_chain_type); + + ~DynamicFilterConfigProviderImplBase() override; + const Init::Target& initTarget() const { return init_target_; } + + void validateTypeUrl(const std::string& type_url) const; + void validateTerminalFilter(const std::string& name, const std::string& filter_type, + bool is_terminal_filter); + + const std::string& name(); + +private: + FilterConfigSubscriptionSharedPtr subscription_; + const absl::flat_hash_set require_type_urls_; + + // Local initialization target to ensure that the subscription starts in + // case no warming is requested by any other filter config provider. + Init::TargetImpl init_target_; + + const bool last_filter_in_filter_chain_; + const std::string filter_chain_type_; +}; + /** * Implementation of a filter config provider using discovery subscriptions. **/ -class DynamicFilterConfigProviderImpl : public DynamicFilterConfigProvider { +class DynamicFilterConfigProviderImpl : public DynamicFilterConfigProviderImplBase, + public DynamicFilterConfigProvider { public: DynamicFilterConfigProviderImpl(FilterConfigSubscriptionSharedPtr& subscription, const absl::flat_hash_set& require_type_urls, Server::Configuration::FactoryContext& factory_context, Envoy::Http::FilterFactoryCb default_config, bool last_filter_in_filter_chain, - const std::string& filter_chain_type); - - ~DynamicFilterConfigProviderImpl() override; + const std::string& filter_chain_type) + : DynamicFilterConfigProviderImplBase(subscription, require_type_urls, + last_filter_in_filter_chain, filter_chain_type), + default_configuration_(default_config ? absl::make_optional(default_config) + : absl::nullopt), + tls_(factory_context.threadLocal()) { + tls_.set([](Event::Dispatcher&) { return std::make_shared(); }); + }; - void validateTypeUrl(const std::string& type_url) const; - void validateTerminalFilter(const std::string& name, const std::string& filter_type, - bool is_terminal_filter); // Config::ExtensionConfigProvider - const std::string& name() override; - absl::optional config() override; + const std::string& name() override { return DynamicFilterConfigProviderImplBase::name(); } + absl::optional config() override { return tls_->config_; } // Config::DynamicExtensionConfigProvider void onConfigUpdate(Envoy::Http::FilterFactoryCb config, const std::string&, - Config::ConfigAppliedCb cb) override; - void onConfigRemoved(Config::ConfigAppliedCb cb) override; + Config::ConfigAppliedCb cb) override { + tls_.runOnAllThreads( + [config, cb](OptRef tls) { + tls->config_ = config; + if (cb) { + cb(); + } + }, + [this, config]() { + // This happens after all workers have discarded the previous config so it can be safely + // deleted on the main thread by an update with the new config. + this->current_config_ = config; + }); + } + + void onConfigRemoved(Config::ConfigAppliedCb applied_on_all_threads) override { + tls_.runOnAllThreads( + [config = default_configuration_](OptRef tls) { tls->config_ = config; }, + [this, applied_on_all_threads]() { + // This happens after all workers have discarded the previous config so it can be safely + // deleted on the main thread by an update with the new config. + this->current_config_ = default_configuration_; + if (applied_on_all_threads) { + applied_on_all_threads(); + } + }); + } + void applyDefaultConfiguration() override { if (default_configuration_) { onConfigUpdate(*default_configuration_, "", nullptr); @@ -66,21 +126,11 @@ class DynamicFilterConfigProviderImpl : public DynamicFilterConfigProvider { absl::optional config_{}; }; - FilterConfigSubscriptionSharedPtr subscription_; - const absl::flat_hash_set require_type_urls_; // Currently applied configuration to ensure that the main thread deletes the last reference to // it. absl::optional current_config_{absl::nullopt}; const absl::optional default_configuration_; ThreadLocal::TypedSlot tls_; - - // Local initialization target to ensure that the subscription starts in - // case no warming is requested by any other filter config provider. - Init::TargetImpl init_target_; - - const bool last_filter_in_filter_chain_; - const std::string filter_chain_type_; - friend class FilterConfigProviderManagerImpl; }; /** @@ -111,7 +161,7 @@ class FilterConfigSubscription const std::string& filter_config_name, Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix, - FilterConfigProviderManagerImpl& filter_config_provider_manager, + FilterConfigProviderManagerImplBase& filter_config_provider_manager, const std::string& subscription_id); ~FilterConfigSubscription() override; @@ -154,11 +204,11 @@ class FilterConfigSubscription const std::string stat_prefix_; ExtensionConfigDiscoveryStats stats_; - // FilterConfigProviderManagerImpl maintains active subscriptions in a map. - FilterConfigProviderManagerImpl& filter_config_provider_manager_; + // FilterConfigProviderManagerImplBase maintains active subscriptions in a map. + FilterConfigProviderManagerImplBase& filter_config_provider_manager_; const std::string subscription_id_; - absl::flat_hash_set filter_config_providers_; - friend class DynamicFilterConfigProviderImpl; + absl::flat_hash_set filter_config_providers_; + friend class DynamicFilterConfigProviderImplBase; // This must be the last since its destructor may call out to stats to report // on draining requests. @@ -183,12 +233,30 @@ class StaticFilterConfigProviderImpl : public FilterConfigProvider { const std::string filter_config_name_; }; +/** + * Base class for a FilterConfigProviderManager. + */ +class FilterConfigProviderManagerImplBase : Logger::Loggable { +protected: + std::shared_ptr + getSubscription(const envoy::config::core::v3::ConfigSource& config_source, + const std::string& name, Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix); + void applyLastOrDefaultConfig(std::shared_ptr& subscription, + DynamicFilterConfigProviderImplBase& provider, + const std::string& filter_config_name); + +private: + absl::flat_hash_map> subscriptions_; + friend class FilterConfigSubscription; +}; + /** * An implementation of FilterConfigProviderManager. */ -class FilterConfigProviderManagerImpl : public FilterConfigProviderManager, - public Singleton::Instance, - Logger::Loggable { +class FilterConfigProviderManagerImpl : public FilterConfigProviderManagerImplBase, + public FilterConfigProviderManager, + public Singleton::Instance { public: DynamicFilterConfigProviderPtr createDynamicFilterConfigProvider( const envoy::config::core::v3::ExtensionConfigSource& config_source, @@ -202,15 +270,24 @@ class FilterConfigProviderManagerImpl : public FilterConfigProviderManager, return std::make_unique(config, filter_config_name); } -private: - std::shared_ptr - getSubscription(const envoy::config::core::v3::ConfigSource& config_source, - const std::string& name, Server::Configuration::FactoryContext& factory_context, - const std::string& stat_prefix); - absl::flat_hash_map> subscriptions_; - friend class FilterConfigSubscription; +protected: + virtual Http::FilterFactoryCb + getDefaultConfig(const ProtobufWkt::Any& proto_config, const std::string& filter_config_name, + Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix, bool last_filter_in_filter_chain, + const std::string& filter_chain_type, + const absl::flat_hash_set require_type_urls) const PURE; +}; + +class HttpFilterConfigProviderManagerImpl : public FilterConfigProviderManagerImpl { +protected: + Http::FilterFactoryCb + getDefaultConfig(const ProtobufWkt::Any& proto_config, const std::string& filter_config_name, + Server::Configuration::FactoryContext& factory_context, + const std::string& stat_prefix, bool last_filter_in_filter_chain, + const std::string& filter_chain_type, + const absl::flat_hash_set require_type_urls) const override; }; -} // namespace Http } // namespace Filter } // namespace Envoy diff --git a/source/common/formatter/substitution_formatter.cc b/source/common/formatter/substitution_formatter.cc index a50fef64617a8..cc11f6cf0d3e8 100644 --- a/source/common/formatter/substitution_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -655,11 +655,11 @@ class StreamInfoSslConnectionInfoFieldExtractor : public StreamInfoFormatter::Fi StreamInfoSslConnectionInfoFieldExtractor(FieldExtractor f) : field_extractor_(f) {} absl::optional extract(const StreamInfo::StreamInfo& stream_info) const override { - if (stream_info.downstreamSslConnection() == nullptr) { + if (stream_info.downstreamAddressProvider().sslConnection() == nullptr) { return absl::nullopt; } - const auto value = field_extractor_(*stream_info.downstreamSslConnection()); + const auto value = field_extractor_(*stream_info.downstreamAddressProvider().sslConnection()); if (value && value->empty()) { return absl::nullopt; } @@ -668,11 +668,11 @@ class StreamInfoSslConnectionInfoFieldExtractor : public StreamInfoFormatter::Fi } ProtobufWkt::Value extractValue(const StreamInfo::StreamInfo& stream_info) const override { - if (stream_info.downstreamSslConnection() == nullptr) { + if (stream_info.downstreamAddressProvider().sslConnection() == nullptr) { return unspecifiedValue(); } - const auto value = field_extractor_(*stream_info.downstreamSslConnection()); + const auto value = field_extractor_(*stream_info.downstreamAddressProvider().sslConnection()); if (value && value->empty()) { return unspecifiedValue(); } @@ -1136,8 +1136,10 @@ GrpcStatusFormatter::formatValue(const Http::RequestHeaderMap&, MetadataFormatter::MetadataFormatter(const std::string& filter_namespace, const std::vector& path, - absl::optional max_length) - : filter_namespace_(filter_namespace), path_(path), max_length_(max_length) {} + absl::optional max_length, + MetadataFormatter::GetMetadataFunction get_func) + : filter_namespace_(filter_namespace), path_(path), max_length_(max_length), + get_func_(get_func) {} absl::optional MetadataFormatter::formatMetadata(const envoy::config::core::v3::Metadata& metadata) const { @@ -1177,54 +1179,46 @@ MetadataFormatter::formatMetadataValue(const envoy::config::core::v3::Metadata& return val; } +absl::optional MetadataFormatter::format(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { + auto metadata = get_func_(stream_info); + return (metadata != nullptr) ? formatMetadata(*metadata) : absl::nullopt; +} + +ProtobufWkt::Value MetadataFormatter::formatValue(const Http::RequestHeaderMap&, + const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const { + auto metadata = get_func_(stream_info); + return formatMetadataValue((metadata != nullptr) ? *metadata + : envoy::config::core::v3::Metadata()); +} // TODO(glicht): Consider adding support for route/listener/cluster metadata as suggested by // @htuch. See: https://github.com/envoyproxy/envoy/issues/3006 DynamicMetadataFormatter::DynamicMetadataFormatter(const std::string& filter_namespace, const std::vector& path, absl::optional max_length) - : MetadataFormatter(filter_namespace, path, max_length) {} - -absl::optional DynamicMetadataFormatter::format( - const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info, absl::string_view) const { - return MetadataFormatter::formatMetadata(stream_info.dynamicMetadata()); -} - -ProtobufWkt::Value DynamicMetadataFormatter::formatValue(const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info, - absl::string_view) const { - return MetadataFormatter::formatMetadataValue(stream_info.dynamicMetadata()); -} + : MetadataFormatter(filter_namespace, path, max_length, + [](const StreamInfo::StreamInfo& stream_info) { + return &stream_info.dynamicMetadata(); + }) {} ClusterMetadataFormatter::ClusterMetadataFormatter(const std::string& filter_namespace, const std::vector& path, absl::optional max_length) - : MetadataFormatter(filter_namespace, path, max_length) {} - -absl::optional ClusterMetadataFormatter::format( - const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info, absl::string_view) const { - auto cluster_info = stream_info.upstreamClusterInfo(); - if (!cluster_info.has_value() || cluster_info.value() == nullptr) { - return absl::nullopt; - } - return MetadataFormatter::formatMetadata(cluster_info.value()->metadata()); -} - -ProtobufWkt::Value ClusterMetadataFormatter::formatValue(const Http::RequestHeaderMap&, - const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, - const StreamInfo::StreamInfo& stream_info, - absl::string_view) const { - auto cluster_info = stream_info.upstreamClusterInfo(); - if (!cluster_info.has_value() || cluster_info.value() == nullptr) { - // Let the formatter do its thing with empty metadata. - return MetadataFormatter::formatMetadataValue(envoy::config::core::v3::Metadata()); - } - return MetadataFormatter::formatMetadataValue(cluster_info.value()->metadata()); -} + : MetadataFormatter(filter_namespace, path, max_length, + [](const StreamInfo::StreamInfo& stream_info) + -> const envoy::config::core::v3::Metadata* { + auto cluster_info = stream_info.upstreamClusterInfo(); + if (!cluster_info.has_value() || cluster_info.value() == nullptr) { + return nullptr; + } + return &cluster_info.value()->metadata(); + }) {} FilterStateFormatter::FilterStateFormatter(const std::string& key, absl::optional max_length, @@ -1335,7 +1329,8 @@ DownstreamPeerCertVStartFormatter::DownstreamPeerCertVStartFormatter(const std:: parseFormat(token, sizeof("DOWNSTREAM_PEER_CERT_V_START(") - 1), std::make_unique( [](const StreamInfo::StreamInfo& stream_info) -> absl::optional { - const auto connection_info = stream_info.downstreamSslConnection(); + const auto connection_info = + stream_info.downstreamAddressProvider().sslConnection(); return connection_info != nullptr ? connection_info->validFromPeerCertificate() : absl::optional(); })) {} @@ -1347,7 +1342,8 @@ DownstreamPeerCertVEndFormatter::DownstreamPeerCertVEndFormatter(const std::stri parseFormat(token, sizeof("DOWNSTREAM_PEER_CERT_V_END(") - 1), std::make_unique( [](const StreamInfo::StreamInfo& stream_info) -> absl::optional { - const auto connection_info = stream_info.downstreamSslConnection(); + const auto connection_info = + stream_info.downstreamAddressProvider().sslConnection(); return connection_info != nullptr ? connection_info->expirationPeerCertificate() : absl::optional(); })) {} diff --git a/source/common/formatter/substitution_formatter.h b/source/common/formatter/substitution_formatter.h index ebe4752d1a56c..239700da17236 100644 --- a/source/common/formatter/substitution_formatter.h +++ b/source/common/formatter/substitution_formatter.h @@ -447,10 +447,22 @@ class StreamInfoFormatter : public FormatterProvider { /** * Base formatter for formatting Metadata objects */ -class MetadataFormatter { +class MetadataFormatter : public FormatterProvider { public: + using GetMetadataFunction = + std::function; MetadataFormatter(const std::string& filter_namespace, const std::vector& path, - absl::optional max_length); + absl::optional max_length, GetMetadataFunction get); + + absl::optional format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const override; + + ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, + const Http::ResponseTrailerMap&, + const StreamInfo::StreamInfo& stream_info, + absl::string_view) const override; protected: absl::optional @@ -461,40 +473,25 @@ class MetadataFormatter { std::string filter_namespace_; std::vector path_; absl::optional max_length_; + GetMetadataFunction get_func_; }; /** * FormatterProvider for DynamicMetadata from StreamInfo. */ -class DynamicMetadataFormatter : public FormatterProvider, MetadataFormatter { +class DynamicMetadataFormatter : public MetadataFormatter { public: DynamicMetadataFormatter(const std::string& filter_namespace, const std::vector& path, absl::optional max_length); - - // FormatterProvider - absl::optional format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, - absl::string_view) const override; - ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, - absl::string_view) const override; }; /** * FormatterProvider for ClusterMetadata from StreamInfo. */ -class ClusterMetadataFormatter : public FormatterProvider, MetadataFormatter { +class ClusterMetadataFormatter : public MetadataFormatter { public: ClusterMetadataFormatter(const std::string& filter_namespace, const std::vector& path, absl::optional max_length); - - // FormatterProvider - absl::optional format(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, - absl::string_view) const override; - ProtobufWkt::Value formatValue(const Http::RequestHeaderMap&, const Http::ResponseHeaderMap&, - const Http::ResponseTrailerMap&, const StreamInfo::StreamInfo&, - absl::string_view) const override; }; /** diff --git a/source/common/grpc/BUILD b/source/common/grpc/BUILD index 0681c33674b59..8e3cc89fd7e77 100644 --- a/source/common/grpc/BUILD +++ b/source/common/grpc/BUILD @@ -33,7 +33,6 @@ envoy_cc_library( ":typed_async_client_lib", "//envoy/grpc:async_client_interface", "//source/common/buffer:zero_copy_input_stream_lib", - "//source/common/config:version_converter_lib", "//source/common/http:async_client_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/grpc/async_client_impl.cc b/source/common/grpc/async_client_impl.cc index 53aeacf6236e6..dd63a6bac88f6 100644 --- a/source/common/grpc/async_client_impl.cc +++ b/source/common/grpc/async_client_impl.cc @@ -21,6 +21,7 @@ AsyncClientImpl::AsyncClientImpl(Upstream::ClusterManager& cm, Router::HeaderParser::configure(config.initial_metadata(), /*append=*/false)) {} AsyncClientImpl::~AsyncClientImpl() { + ASSERT(isThreadSafe()); while (!active_streams_.empty()) { active_streams_.front()->resetStream(); } @@ -31,6 +32,7 @@ AsyncRequest* AsyncClientImpl::sendRaw(absl::string_view service_full_name, RawAsyncRequestCallbacks& callbacks, Tracing::Span& parent_span, const Http::AsyncClient::RequestOptions& options) { + ASSERT(isThreadSafe()); auto* const async_request = new AsyncRequestImpl( *this, service_full_name, method_name, std::move(request), callbacks, parent_span, options); AsyncStreamImplPtr grpc_stream{async_request}; @@ -48,6 +50,7 @@ RawAsyncStream* AsyncClientImpl::startRaw(absl::string_view service_full_name, absl::string_view method_name, RawAsyncStreamCallbacks& callbacks, const Http::AsyncClient::StreamOptions& options) { + ASSERT(isThreadSafe()); auto grpc_stream = std::make_unique(*this, service_full_name, method_name, callbacks, options); diff --git a/source/common/grpc/codec.cc b/source/common/grpc/codec.cc index 3c8233d2644c8..ca4808a5852ff 100644 --- a/source/common/grpc/codec.cc +++ b/source/common/grpc/codec.cc @@ -23,9 +23,13 @@ void Encoder::newFrame(uint8_t flags, uint64_t length, std::array& o } void Encoder::prependFrameHeader(uint8_t flags, Buffer::Instance& buffer) { + prependFrameHeader(flags, buffer, buffer.length()); +} + +void Encoder::prependFrameHeader(uint8_t flags, Buffer::Instance& buffer, uint32_t message_length) { // Compute the size of the payload and construct the length prefix. std::array frame; - Grpc::Encoder().newFrame(flags, buffer.length(), frame); + Grpc::Encoder().newFrame(flags, message_length, frame); Buffer::OwnedImpl frame_buffer(frame.data(), frame.size()); buffer.prepend(frame_buffer); } diff --git a/source/common/grpc/codec.h b/source/common/grpc/codec.h index 7577cac5135b0..d886851ae49ea 100644 --- a/source/common/grpc/codec.h +++ b/source/common/grpc/codec.h @@ -35,8 +35,15 @@ class Encoder { // Prepend the gRPC frame into the buffer. // @param flags supplies the GRPC data frame flags. - // @param buffer the buffer with the message payload. + // @param buffer the full buffer with the message payload. void prependFrameHeader(uint8_t flags, Buffer::Instance& buffer); + + // Prepend the gRPC frame into the buffer. + // @param flags supplies the GRPC data frame flags. + // @param buffer the buffer with the first part of the message payload. + // @param message_length the total length of the message, which may be longer + // than buffer. + void prependFrameHeader(uint8_t flags, Buffer::Instance& buffer, uint32_t message_length); }; // Wire format (http://www.grpc.io/docs/guides/wire.html) of GRPC data frame diff --git a/source/common/grpc/google_async_client_impl.cc b/source/common/grpc/google_async_client_impl.cc index 56380d58dcb13..8e4539536cdc3 100644 --- a/source/common/grpc/google_async_client_impl.cc +++ b/source/common/grpc/google_async_client_impl.cc @@ -108,6 +108,7 @@ GoogleAsyncClientImpl::GoogleAsyncClientImpl(Event::Dispatcher& dispatcher, } GoogleAsyncClientImpl::~GoogleAsyncClientImpl() { + ASSERT(isThreadSafe()); ENVOY_LOG(debug, "Client teardown, resetting streams"); while (!active_streams_.empty()) { active_streams_.front()->resetStream(); @@ -120,6 +121,7 @@ AsyncRequest* GoogleAsyncClientImpl::sendRaw(absl::string_view service_full_name RawAsyncRequestCallbacks& callbacks, Tracing::Span& parent_span, const Http::AsyncClient::RequestOptions& options) { + ASSERT(isThreadSafe()); auto* const async_request = new GoogleAsyncRequestImpl( *this, service_full_name, method_name, std::move(request), callbacks, parent_span, options); GoogleAsyncStreamImplPtr grpc_stream{async_request}; @@ -137,6 +139,7 @@ RawAsyncStream* GoogleAsyncClientImpl::startRaw(absl::string_view service_full_n absl::string_view method_name, RawAsyncStreamCallbacks& callbacks, const Http::AsyncClient::StreamOptions& options) { + ASSERT(isThreadSafe()); auto grpc_stream = std::make_unique(*this, service_full_name, method_name, callbacks, options); diff --git a/source/common/grpc/typed_async_client.h b/source/common/grpc/typed_async_client.h index 22a009054357c..af9e8df9492aa 100644 --- a/source/common/grpc/typed_async_client.h +++ b/source/common/grpc/typed_async_client.h @@ -6,7 +6,6 @@ #include "envoy/grpc/async_client.h" #include "source/common/common/empty_string.h" -#include "source/common/config/version_converter.h" namespace Envoy { namespace Grpc { @@ -40,12 +39,6 @@ template class AsyncStream /* : public RawAsyncStream */ { void sendMessage(const Protobuf::Message& request, bool end_stream) { Internal::sendMessageUntyped(stream_, std::move(request), end_stream); } - void sendMessage(const Protobuf::Message& request, - envoy::config::core::v3::ApiVersion transport_api_version, bool end_stream) { - Config::VersionConverter::prepareMessageForGrpcWire(const_cast(request), - transport_api_version); - Internal::sendMessageUntyped(stream_, std::move(request), end_stream); - } void closeStream() { stream_->closeStream(); } void resetStream() { stream_->resetStream(); } bool isAboveWriteBufferHighWatermark() const { @@ -86,55 +79,6 @@ template class AsyncRequestCallbacks : public RawAsyncReques } }; -/** - * Versioned methods wrapper. - */ -class VersionedMethods { -public: - VersionedMethods(const std::string& v3, const std::string& v2, const std::string& v2_alpha = "") - : v3_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v3)), - v2_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v2)), - v2_alpha_(v2_alpha.empty() - ? nullptr - : Protobuf::DescriptorPool::generated_pool()->FindMethodByName(v2_alpha)) {} - - /** - * Given a version, return the method descriptor for a specific version. - * - * @param api_version target API version. - * @param use_alpha if this is an alpha version of an API method. - * - * @return Protobuf::MethodDescriptor& of a method for a specific version. - */ - const Protobuf::MethodDescriptor& - getMethodDescriptorForVersion(envoy::config::core::v3::ApiVersion api_version, - bool use_alpha = false) const { - switch (api_version) { - case envoy::config::core::v3::ApiVersion::AUTO: - FALLTHRU; - case envoy::config::core::v3::ApiVersion::V2: { - const auto* descriptor = use_alpha ? v2_alpha_ : v2_; - ASSERT(descriptor != nullptr); - return *descriptor; - } - - case envoy::config::core::v3::ApiVersion::V3: { - const auto* descriptor = v3_; - ASSERT(descriptor != nullptr); - return *descriptor; - } - - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - } - -private: - const Protobuf::MethodDescriptor* v3_{nullptr}; - const Protobuf::MethodDescriptor* v2_{nullptr}; - const Protobuf::MethodDescriptor* v2_alpha_{nullptr}; -}; - /** * Convenience subclasses for AsyncStreamCallbacks. */ @@ -170,16 +114,6 @@ template class AsyncClient /* : public Raw return Internal::sendUntyped(client_.get(), service_method, request, callbacks, parent_span, options); } - virtual AsyncRequest* send(const Protobuf::MethodDescriptor& service_method, - const Protobuf::Message& request, - AsyncRequestCallbacks& callbacks, Tracing::Span& parent_span, - const Http::AsyncClient::RequestOptions& options, - envoy::config::core::v3::ApiVersion transport_api_version) { - Config::VersionConverter::prepareMessageForGrpcWire(const_cast(request), - transport_api_version); - return Internal::sendUntyped(client_.get(), service_method, request, callbacks, parent_span, - options); - } virtual AsyncStream start(const Protobuf::MethodDescriptor& service_method, AsyncStreamCallbacks& callbacks, diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index f1638e928d71b..f913103750301 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -25,9 +25,6 @@ const AsyncStreamImpl::NullVirtualHost AsyncStreamImpl::RouteEntryImpl::virtual_ const AsyncStreamImpl::NullRateLimitPolicy AsyncStreamImpl::NullVirtualHost::rate_limit_policy_; const AsyncStreamImpl::NullConfig AsyncStreamImpl::NullVirtualHost::route_configuration_; const std::multimap AsyncStreamImpl::RouteEntryImpl::opaque_config_; -const envoy::config::core::v3::Metadata AsyncStreamImpl::RouteEntryImpl::metadata_; -const Config::TypedMetadataImpl - AsyncStreamImpl::RouteEntryImpl::typed_metadata_({}); const AsyncStreamImpl::NullPathMatchCriterion AsyncStreamImpl::RouteEntryImpl::path_match_criterion_; const absl::optional diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 5f45c023b3075..0d0273c696b14 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -192,9 +192,6 @@ class AsyncStreamImpl : public AsyncClient::Stream, const Router::RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; } const Router::CorsPolicy* corsPolicy() const override { return nullptr; } const Router::Config& routeConfig() const override { return route_configuration_; } - const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override { - return nullptr; - } bool includeAttemptCountInRequest() const override { return false; } bool includeAttemptCountInResponse() const override { return false; } uint32_t retryShadowBufferLimit() const override { @@ -299,15 +296,10 @@ class AsyncStreamImpl : public AsyncClient::Stream, const Router::VirtualHost& virtualHost() const override { return virtual_host_; } bool autoHostRewrite() const override { return false; } bool includeVirtualHostRateLimits() const override { return true; } - const envoy::config::core::v3::Metadata& metadata() const override { return metadata_; } - const Config::TypedMetadata& typedMetadata() const override { return typed_metadata_; } const Router::PathMatchCriterion& pathMatchCriterion() const override { return path_match_criterion_; } - const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override { - return nullptr; - } const absl::optional& connectConfig() const override { return connect_config_nullopt_; } @@ -325,9 +317,6 @@ class AsyncStreamImpl : public AsyncClient::Stream, static const std::vector shadow_policies_; static const NullVirtualHost virtual_host_; static const std::multimap opaque_config_; - static const envoy::config::core::v3::Metadata metadata_; - // Async client doesn't require metadata. - static const Config::TypedMetadataImpl typed_metadata_; static const NullPathMatchCriterion path_match_criterion_; Router::RouteEntry::UpgradeMap upgrade_map_; @@ -343,18 +332,26 @@ class AsyncStreamImpl : public AsyncClient::Stream, const Protobuf::RepeatedPtrField& hash_policy, const absl::optional& retry_policy) - : route_entry_(cluster_name, timeout, hash_policy, retry_policy) {} + : route_entry_(cluster_name, timeout, hash_policy, retry_policy), typed_metadata_({}) {} // Router::Route const Router::DirectResponseEntry* directResponseEntry() const override { return nullptr; } const Router::RouteEntry* routeEntry() const override { return &route_entry_; } const Router::Decorator* decorator() const override { return nullptr; } const Router::RouteTracing* tracingConfig() const override { return nullptr; } - const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override { + const Router::RouteSpecificFilterConfig* + mostSpecificPerFilterConfig(const std::string&) const override { return nullptr; } + void traversePerFilterConfig( + const std::string&, + std::function) const override {} + const envoy::config::core::v3::Metadata& metadata() const override { return metadata_; } + const Envoy::Config::TypedMetadata& typedMetadata() const override { return typed_metadata_; } RouteEntryImpl route_entry_; + const envoy::config::core::v3::Metadata metadata_; + const Envoy::Config::TypedMetadataImpl typed_metadata_; }; void cleanup(); diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 899dde6f9eff0..29aa601384a44 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -86,7 +86,6 @@ RequestEncoder& CodecClient::newStream(ResponseDecoder& response_decoder) { void CodecClient::onEvent(Network::ConnectionEvent event) { if (event == Network::ConnectionEvent::Connected) { ENVOY_CONN_LOG(debug, "connected", *connection_); - connection_->streamInfo().setDownstreamSslConnection(connection_->ssl()); connected_ = true; } diff --git a/source/common/http/codec_helper.h b/source/common/http/codec_helper.h index 4a304c159e56c..b0689be22562e 100644 --- a/source/common/http/codec_helper.h +++ b/source/common/http/codec_helper.h @@ -1,5 +1,7 @@ #pragma once +#include "envoy/event/dispatcher.h" +#include "envoy/event/timer.h" #include "envoy/http/codec.h" #include "source/common/common/assert.h" @@ -82,5 +84,55 @@ class StreamCallbackHelper { uint32_t high_watermark_callbacks_{}; }; +// A base class shared between Http2 codec and Http3 codec to set a timeout for locally ended stream +// with buffered data. +class MultiplexedStreamImplBase : public Stream, public StreamCallbackHelper { +public: + MultiplexedStreamImplBase(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {} + ~MultiplexedStreamImplBase() override { ASSERT(stream_idle_timer_ == nullptr); } + // TODO(mattklein123): Optimally this would be done in the destructor but there are currently + // deferred delete lifetime issues that need sorting out if the destructor of the stream is + // going to be able to refer to the parent connection. + virtual void destroy() { disarmStreamIdleTimer(); } + + void onLocalEndStream() { + ASSERT(local_end_stream_); + if (hasPendingData()) { + createPendingFlushTimer(); + } + } + + void disarmStreamIdleTimer() { + if (stream_idle_timer_ != nullptr) { + // To ease testing and the destructor assertion. + stream_idle_timer_->disableTimer(); + stream_idle_timer_.reset(); + } + } + +protected: + void setFlushTimeout(std::chrono::milliseconds timeout) override { + stream_idle_timeout_ = timeout; + } + + void createPendingFlushTimer() { + ASSERT(stream_idle_timer_ == nullptr); + if (stream_idle_timeout_.count() > 0) { + stream_idle_timer_ = dispatcher_.createTimer([this] { onPendingFlushTimer(); }); + stream_idle_timer_->enableTimer(stream_idle_timeout_); + } + } + + virtual void onPendingFlushTimer() { stream_idle_timer_.reset(); } + + virtual bool hasPendingData() PURE; + +private: + Event::Dispatcher& dispatcher_; + // See HttpConnectionManager.stream_idle_timeout. + std::chrono::milliseconds stream_idle_timeout_{}; + Event::TimerPtr stream_idle_timer_; +}; + } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 8050bb884077f..90f6be301b634 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -129,8 +129,8 @@ void ConnectionManagerImpl::initializeReadFilterCallbacks(Network::ReadFilterCal read_callbacks_->connection().streamInfo().filterState()->setData( Network::ProxyProtocolFilterState::key(), std::make_unique(Network::ProxyProtocolData{ - read_callbacks_->connection().addressProvider().remoteAddress(), - read_callbacks_->connection().addressProvider().localAddress()}), + read_callbacks_->connection().connectionInfoProvider().remoteAddress(), + read_callbacks_->connection().connectionInfoProvider().localAddress()}), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection); } @@ -273,16 +273,14 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod ENVOY_CONN_LOG(debug, "new stream", read_callbacks_->connection()); - // Set the account to start accounting if enabled. This is still a - // work-in-progress, and will be removed when other features using the - // accounting are implemented. - Buffer::BufferMemoryAccountSharedPtr downstream_request_account; - if (Runtime::runtimeFeatureEnabled("envoy.test_only.per_stream_buffer_accounting")) { - downstream_request_account = std::make_shared(); - response_encoder.getStream().setAccount(downstream_request_account); - } + // Create account, wiring the stream to use it for tracking bytes. + // If tracking is disabled, the wiring becomes a NOP. + auto& buffer_factory = read_callbacks_->connection().dispatcher().getWatermarkFactory(); + Buffer::BufferMemoryAccountSharedPtr downstream_stream_account = + buffer_factory.createAccount(response_encoder.getStream()); + response_encoder.getStream().setAccount(downstream_stream_account); ActiveStreamPtr new_stream(new ActiveStream(*this, response_encoder.getStream().bufferLimit(), - std::move(downstream_request_account))); + std::move(downstream_stream_account))); accumulated_requests_++; if (config_.maxRequestsPerConnection() > 0 && @@ -662,9 +660,6 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect connection_manager_.stats_.named_.downstream_rq_http1_total_.inc(); } - filter_manager_.streamInfo().setDownstreamSslConnection( - connection_manager_.read_callbacks_->connection().ssl()); - if (connection_manager_.config_.streamIdleTimeout().count()) { idle_timeout_ms_ = connection_manager_.config_.streamIdleTimeout(); stream_idle_timer_ = @@ -822,7 +817,7 @@ const Network::Connection* ConnectionManagerImpl::ActiveStream::connection() { } uint32_t ConnectionManagerImpl::ActiveStream::localPort() { - auto ip = connection()->addressProvider().localAddress()->ip(); + auto ip = connection()->connectionInfoProvider().localAddress()->ip(); if (ip == nullptr) { return 0; } @@ -1190,10 +1185,11 @@ void ConnectionManagerImpl::ActiveStream::snapScopedRouteConfig() { void ConnectionManagerImpl::ActiveStream::refreshCachedRoute() { refreshCachedRoute(nullptr); } void ConnectionManagerImpl::ActiveStream::refreshDurationTimeout() { - if (!filter_manager_.streamInfo().route_entry_ || !request_headers_) { + if (!filter_manager_.streamInfo().route() || + !filter_manager_.streamInfo().route()->routeEntry() || !request_headers_) { return; } - auto& route = filter_manager_.streamInfo().route_entry_; + const auto& route = filter_manager_.streamInfo().route()->routeEntry(); auto grpc_timeout = Grpc::Common::getGrpcTimeout(*request_headers_); std::chrono::milliseconds timeout; @@ -1516,6 +1512,7 @@ void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason reset_ // 1) We TX an app level reset // 2) The codec TX a codec level reset // 3) The codec RX a reset + // 4) The overload manager reset the stream // If we need to differentiate we need to do it inside the codec. Can start with this. ENVOY_STREAM_LOG(debug, "stream reset", *this); connection_manager_.stats_.named_.downstream_rq_rx_reset_.inc(); @@ -1530,6 +1527,14 @@ void ConnectionManagerImpl::ActiveStream::onResetStream(StreamResetReason reset_ filter_manager_.streamInfo().setResponseCodeDetails(encoder_details); } + // Check if we're in the overload manager reset case. + // encoder_details should be empty in this case as we don't have a codec error. + if (encoder_details.empty() && reset_reason == StreamResetReason::OverloadManager) { + filter_manager_.streamInfo().setResponseFlag(StreamInfo::ResponseFlag::OverloadManager); + filter_manager_.streamInfo().setResponseCodeDetails( + StreamInfo::ResponseCodeDetails::get().Overload); + } + connection_manager_.doDeferredStreamDestroy(*this); } @@ -1608,14 +1613,15 @@ ConnectionManagerImpl::ActiveStream::route(const Router::RouteCallback& cb) { * functions as a helper to refreshCachedRoute(const Router::RouteCallback& cb). */ void ConnectionManagerImpl::ActiveStream::setRoute(Router::RouteConstSharedPtr route) { - filter_manager_.streamInfo().route_entry_ = route ? route->routeEntry() : nullptr; + filter_manager_.streamInfo().route_ = route; cached_route_ = std::move(route); - if (nullptr == filter_manager_.streamInfo().route_entry_) { + if (nullptr == filter_manager_.streamInfo().route() || + nullptr == filter_manager_.streamInfo().route()->routeEntry()) { cached_cluster_info_ = nullptr; } else { Upstream::ThreadLocalCluster* local_cluster = connection_manager_.cluster_manager_.getThreadLocalCluster( - filter_manager_.streamInfo().route_entry_->clusterName()); + filter_manager_.streamInfo().route()->routeEntry()->clusterName()); cached_cluster_info_ = (nullptr == local_cluster) ? nullptr : local_cluster->info(); } diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 2171e9d3f6b12..41df7a465e8cc 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -100,6 +100,18 @@ ConnectionManagerUtility::MutateRequestHeadersResult ConnectionManagerUtility::m request_headers.removeProxyConnection(); request_headers.removeTransferEncoding(); + // Sanitize referer field if exists. + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.sanitize_http_header_referer")) { + auto result = request_headers.get(Http::CustomHeaders::get().Referer); + if (!result.empty()) { + Utility::Url url; + if (result.size() > 1 || !url.initialize(result[0]->value().getStringView(), false)) { + // A request header shouldn't have multiple referer field. + request_headers.remove(Http::CustomHeaders::get().Referer); + } + } + } + // If we are "using remote address" this means that we create/append to XFF with our immediate // peer. Cases where we don't "use remote address" include trusted double proxy where we expect // our peer to have already properly set XFF, etc. @@ -120,13 +132,14 @@ ConnectionManagerUtility::MutateRequestHeadersResult ConnectionManagerUtility::m // are but they didn't populate XFF properly, the trusted client address is the // source address of the immediate downstream's connection to us. if (final_remote_address == nullptr) { - final_remote_address = connection.addressProvider().remoteAddress(); + final_remote_address = connection.connectionInfoProvider().remoteAddress(); } if (!config.skipXffAppend()) { - if (Network::Utility::isLoopbackAddress(*connection.addressProvider().remoteAddress())) { + if (Network::Utility::isLoopbackAddress( + *connection.connectionInfoProvider().remoteAddress())) { Utility::appendXff(request_headers, config.localAddress()); } else { - Utility::appendXff(request_headers, *connection.addressProvider().remoteAddress()); + Utility::appendXff(request_headers, *connection.connectionInfoProvider().remoteAddress()); } } // If the prior hop is not a trusted proxy, overwrite any x-forwarded-proto value it set as @@ -143,7 +156,7 @@ ConnectionManagerUtility::MutateRequestHeadersResult ConnectionManagerUtility::m // If we find one, it will be used as the downstream address for logging. It may or may not be // used for determining internal/external status (see below). OriginalIPDetectionParams params = {request_headers, - connection.addressProvider().remoteAddress()}; + connection.connectionInfoProvider().remoteAddress()}; for (const auto& detection_extension : config.originalIpDetectionExtensions()) { const auto result = detection_extension->detect(params); @@ -196,7 +209,7 @@ ConnectionManagerUtility::MutateRequestHeadersResult ConnectionManagerUtility::m // After determining internal request status, if there is no final remote address, due to no XFF, // busted XFF, etc., use the direct connection remote address for logging. if (final_remote_address == nullptr) { - final_remote_address = connection.addressProvider().remoteAddress(); + final_remote_address = connection.connectionInfoProvider().remoteAddress(); } // Edge request is the request from external clients to front Envoy. @@ -474,6 +487,20 @@ ConnectionManagerUtility::maybeNormalizePath(RequestHeaderMap& request_headers, return NormalizePathAction::Continue; // It's as valid as it is going to get. } + auto fragment_pos = request_headers.getPathValue().find('#'); + if (fragment_pos != absl::string_view::npos) { + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http_reject_path_with_fragment")) { + return NormalizePathAction::Reject; + } + // Check runtime override and throw away fragment from URI path + // TODO(yanavlasov): remove this override after deprecation period. + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled")) { + request_headers.setPath(request_headers.getPathValue().substr(0, fragment_pos)); + } + } + NormalizePathAction final_action = NormalizePathAction::Continue; const auto escaped_slashes_action = config.pathWithEscapedSlashesAction(); ASSERT(escaped_slashes_action != envoy::extensions::filters::network::http_connection_manager:: diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 4a67bea9da859..8bc119a38f871 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -60,7 +60,7 @@ ConnectionPool::Cancellable* HttpConnPoolImplBase::newStream(Http::ResponseDecoder& response_decoder, Http::ConnectionPool::Callbacks& callbacks) { HttpAttachContext context({&response_decoder, &callbacks}); - return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); + return newStreamImpl(context); } bool HttpConnPoolImplBase::hasActiveConnections() const { diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index e2f7494c0e38b..e14a6ffa2ae3a 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -66,9 +66,7 @@ class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder, Http::ConnectionPool::Callbacks& callbacks) override; - bool maybePreconnect(float ratio) override { - return Envoy::ConnectionPool::ConnPoolImplBase::maybePreconnect(ratio); - } + bool maybePreconnect(float ratio) override { return maybePreconnectImpl(ratio); } bool hasActiveConnections() const override; // Creates a new PendingStream and enqueues it into the queue. diff --git a/source/common/http/filter_manager.cc b/source/common/http/filter_manager.cc index 811807a9561d5..3369ed06c06af 100644 --- a/source/common/http/filter_manager.cc +++ b/source/common/http/filter_manager.cc @@ -531,6 +531,12 @@ void FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHead state_.filter_call_state_ |= FilterCallState::DecodeHeaders; (*entry)->end_stream_ = (end_stream && continue_data_entry == decoder_filters_.end()); FilterHeadersStatus status = (*entry)->decodeHeaders(headers, (*entry)->end_stream_); + if (state_.decoder_filter_chain_aborted_) { + ENVOY_STREAM_LOG(trace, + "decodeHeaders filter iteration aborted due to local reply: filter={}", + *this, static_cast((*entry).get())); + status = FilterHeadersStatus::StopIteration; + } ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " @@ -674,6 +680,11 @@ void FilterManager::decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instan } ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", *this, static_cast((*entry).get()), static_cast(status)); + if (state_.decoder_filter_chain_aborted_) { + ENVOY_STREAM_LOG(trace, "decodeData filter iteration aborted due to local reply: filter={}", + *this, static_cast((*entry).get())); + return; + } processNewlyAddedMetadata(); @@ -764,6 +775,12 @@ void FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTra state_.filter_call_state_ &= ~FilterCallState::DecodeTrailers; ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", *this, static_cast((*entry).get()), static_cast(status)); + if (state_.decoder_filter_chain_aborted_) { + ENVOY_STREAM_LOG(trace, + "decodeTrailers filter iteration aborted due to local reply: filter={}", + *this, static_cast((*entry).get())); + status = FilterTrailersStatus::StopIteration; + } processNewlyAddedMetadata(); @@ -866,6 +883,17 @@ void FilterManager::sendLocalReply( const bool is_head_request = state_.is_head_request_; const bool is_grpc_request = state_.is_grpc_request_; + // Stop filter chain iteration if local reply was sent while filter decoding or encoding callbacks + // are running. + if (state_.filter_call_state_ & (FilterCallState::DecodeHeaders | FilterCallState::DecodeData | + FilterCallState::DecodeTrailers)) { + state_.decoder_filter_chain_aborted_ = true; + } else if (state_.filter_call_state_ & + (FilterCallState::EncodeHeaders | FilterCallState::EncodeData | + FilterCallState::EncodeTrailers)) { + state_.encoder_filter_chain_aborted_ = true; + } + stream_info_.setResponseCodeDetails(details); StreamFilterBase::LocalReplyData data{code, details, false}; @@ -917,8 +945,8 @@ void FilterManager::sendLocalReplyViaFilterChain( state_.destroyed_, Utility::EncodeFunctions{ [this, modify_headers](ResponseHeaderMap& headers) -> void { - if (streamInfo().route_entry_) { - streamInfo().route_entry_->finalizeResponseHeaders(headers, streamInfo()); + if (streamInfo().route() && streamInfo().route()->routeEntry()) { + streamInfo().route()->routeEntry()->finalizeResponseHeaders(headers, streamInfo()); } if (modify_headers) { modify_headers(headers); @@ -956,8 +984,8 @@ void FilterManager::sendDirectLocalReply( state_.destroyed_, Utility::EncodeFunctions{ [this, modify_headers](ResponseHeaderMap& headers) -> void { - if (streamInfo().route_entry_) { - streamInfo().route_entry_->finalizeResponseHeaders(headers, streamInfo()); + if (streamInfo().route() && streamInfo().route()->routeEntry()) { + streamInfo().route()->routeEntry()->finalizeResponseHeaders(headers, streamInfo()); } if (modify_headers) { modify_headers(headers); @@ -1058,6 +1086,12 @@ void FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHea state_.filter_call_state_ |= FilterCallState::EncodeHeaders; (*entry)->end_stream_ = (end_stream && continue_data_entry == encoder_filters_.end()); FilterHeadersStatus status = (*entry)->handle_->encodeHeaders(headers, (*entry)->end_stream_); + if (state_.encoder_filter_chain_aborted_) { + ENVOY_STREAM_LOG(trace, + "encodeHeaders filter iteration aborted due to local reply: filter={}", + *this, static_cast((*entry).get())); + status = FilterHeadersStatus::StopIteration; + } ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " @@ -1220,6 +1254,11 @@ void FilterManager::encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instan (*entry)->end_stream_ = end_stream && !filter_manager_callbacks_.responseTrailers(); FilterDataStatus status = (*entry)->handle_->encodeData(data, (*entry)->end_stream_); + if (state_.encoder_filter_chain_aborted_) { + ENVOY_STREAM_LOG(trace, "encodeData filter iteration aborted due to local reply: filter={}", + *this, static_cast((*entry).get())); + status = FilterDataStatus::StopIterationNoBuffer; + } if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } diff --git a/source/common/http/filter_manager.h b/source/common/http/filter_manager.h index 7c16f0edd1e24..5f42ef6ee642e 100644 --- a/source/common/http/filter_manager.h +++ b/source/common/http/filter_manager.h @@ -584,12 +584,12 @@ class FilterManagerCallbacks { /** * This class allows the remote address to be overridden for HTTP stream info. This is used for - * XFF handling. This is required to avoid providing stream info with a non-const address provider. - * Private inheritance from SocketAddressProvider is used to make sure users get the address - * provider via the normal getter. + * XFF handling. This is required to avoid providing stream info with a non-const connection info + * provider. Private inheritance from ConnectionInfoProvider is used to make sure users get the + * address provider via the normal getter. */ -class OverridableRemoteSocketAddressSetterStreamInfo : public StreamInfo::StreamInfoImpl, - private Network::SocketAddressProvider { +class OverridableRemoteConnectionInfoSetterStreamInfo : public StreamInfo::StreamInfoImpl, + private Network::ConnectionInfoProvider { public: using StreamInfoImpl::StreamInfoImpl; @@ -603,9 +603,11 @@ class OverridableRemoteSocketAddressSetterStreamInfo : public StreamInfo::Stream } // StreamInfo::StreamInfo - const Network::SocketAddressProvider& downstreamAddressProvider() const override { return *this; } + const Network::ConnectionInfoProvider& downstreamAddressProvider() const override { + return *this; + } - // Network::SocketAddressProvider + // Network::ConnectionInfoProvider const Network::Address::InstanceConstSharedPtr& localAddress() const override { return StreamInfoImpl::downstreamAddressProvider().localAddress(); } @@ -626,11 +628,17 @@ class OverridableRemoteSocketAddressSetterStreamInfo : public StreamInfo::Stream absl::optional connectionID() const override { return StreamInfoImpl::downstreamAddressProvider().connectionID(); } + Ssl::ConnectionInfoConstSharedPtr sslConnection() const override { + return StreamInfoImpl::downstreamAddressProvider().sslConnection(); + } + Ssl::ConnectionInfoConstSharedPtr upstreamSslConnection() const override { + return StreamInfoImpl::upstreamSslConnection(); + } void dumpState(std::ostream& os, int indent_level) const override { StreamInfoImpl::dumpState(os, indent_level); const char* spaces = spacesForLevel(indent_level); - os << spaces << "OverridableRemoteSocketAddressSetterStreamInfo " << this + os << spaces << "OverridableRemoteConnectionInfoSetterStreamInfo " << this << DUMP_MEMBER_AS(remoteAddress(), remoteAddress()->asStringView()) << DUMP_MEMBER_AS(directRemoteAddress(), directRemoteAddress()->asStringView()) << DUMP_MEMBER_AS(localAddress(), localAddress()->asStringView()) << "\n"; @@ -659,7 +667,7 @@ class FilterManager : public ScopeTrackedObject, connection_(connection), stream_id_(stream_id), account_(std::move(account)), proxy_100_continue_(proxy_100_continue), buffer_limit_(buffer_limit), filter_chain_factory_(filter_chain_factory), local_reply_(local_reply), - stream_info_(protocol, time_source, connection.addressProviderSharedPtr(), + stream_info_(protocol, time_source, connection.connectionInfoProviderSharedPtr(), parent_filter_state, filter_state_life_span) {} ~FilterManager() override { ASSERT(state_.destroyed_); @@ -1019,7 +1027,7 @@ class FilterManager : public ScopeTrackedObject, FilterChainFactory& filter_chain_factory_; const LocalReply::LocalReply& local_reply_; - OverridableRemoteSocketAddressSetterStreamInfo stream_info_; + OverridableRemoteConnectionInfoSetterStreamInfo stream_info_; // TODO(snowp): Once FM has been moved to its own file we'll make these private classes of FM, // at which point they no longer need to be friends. friend ActiveStreamFilterBase; @@ -1052,7 +1060,8 @@ class FilterManager : public ScopeTrackedObject, State() : remote_complete_(false), local_complete_(false), has_continue_headers_(false), created_filter_chain_(false), is_head_request_(false), is_grpc_request_(false), - non_100_response_headers_encoded_(false), under_on_local_reply_(false) {} + non_100_response_headers_encoded_(false), under_on_local_reply_(false), + decoder_filter_chain_aborted_(false), encoder_filter_chain_aborted_(false) {} uint32_t filter_call_state_{0}; @@ -1072,6 +1081,9 @@ class FilterManager : public ScopeTrackedObject, bool non_100_response_headers_encoded_ : 1; // True under the stack of onLocalReply, false otherwise. bool under_on_local_reply_ : 1; + // True when the filter chain iteration was aborted with local reply. + bool decoder_filter_chain_aborted_ : 1; + bool encoder_filter_chain_aborted_ : 1; // The following 3 members are booleans rather than part of the space-saving bitfield as they // are passed as arguments to functions expecting bools. Extend State using the bitfield diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index ee6956d049d9b..bfd77c8dcfa3a 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -22,6 +22,9 @@ namespace { // This includes the NULL (StringUtil::itoa technically only needs 21). constexpr size_t MaxIntegerLength{32}; +constexpr absl::string_view DelimiterForInlineHeaders{","}; +constexpr absl::string_view DelimiterForInlineCookies{"; "}; + void validateCapacity(uint64_t new_capacity) { // If the resizing will cause buffer overflow due to hitting uint32_t::max, an OOM is likely // imminent. Fast-fail rather than allow a buffer overflow attack (issue #1421) @@ -46,6 +49,13 @@ bool validatedLowerCaseString(absl::string_view str) { return lower_case_str == str; } +absl::string_view delimiterByHeader(const LowerCaseString& key, bool correctly_coalesce_cookies) { + if (correctly_coalesce_cookies && key == Http::Headers::get().Cookie) { + return DelimiterForInlineCookies; + } + return DelimiterForInlineHeaders; +} + } // namespace // Initialize as a Type::Inline @@ -368,8 +378,10 @@ void HeaderMapImpl::insertByKey(HeaderString&& key, HeaderString&& value) { if (*lookup.value().entry_ == nullptr) { maybeCreateInline(lookup.value().entry_, *lookup.value().key_, std::move(value)); } else { + const auto delimiter = + delimiterByHeader(*lookup.value().key_, header_map_correctly_coalesce_cookies_); const uint64_t added_size = - appendToHeader((*lookup.value().entry_)->value(), value.getStringView()); + appendToHeader((*lookup.value().entry_)->value(), value.getStringView(), delimiter); addSize(added_size); value.clear(); } @@ -434,10 +446,8 @@ void HeaderMapImpl::appendCopy(const LowerCaseString& key, absl::string_view val // TODO(#9221): converge on and document a policy for coalescing multiple headers. auto entry = getExisting(key); if (!entry.empty()) { - const std::string delimiter = (key == Http::Headers::get().Cookie ? "; " : ","); - const uint64_t added_size = header_map_correctly_coalesce_cookies_ - ? appendToHeader(entry[0]->value(), value, delimiter) - : appendToHeader(entry[0]->value(), value); + const auto delimiter = delimiterByHeader(key, header_map_correctly_coalesce_cookies_); + const uint64_t added_size = appendToHeader(entry[0]->value(), value, delimiter); addSize(added_size); } else { addCopy(key, value); @@ -658,8 +668,24 @@ HeaderMapImplUtility::getAllHeaderMapImplInfo() { return ret; } -absl::optional -RequestHeaderMapImpl::getTraceContext(absl::string_view key) const { +absl::string_view RequestHeaderMapImpl::protocol() const { return getProtocolValue(); } + +absl::string_view RequestHeaderMapImpl::authority() const { return getHostValue(); } + +absl::string_view RequestHeaderMapImpl::path() const { return getPathValue(); } + +absl::string_view RequestHeaderMapImpl::method() const { return getMethodValue(); } + +void RequestHeaderMapImpl::forEach(Tracing::TraceContext::IterateCallback callback) const { + HeaderMapImpl::iterate([cb = std::move(callback)](const HeaderEntry& entry) { + if (cb(entry.key().getStringView(), entry.value().getStringView())) { + return HeaderMap::Iterate::Continue; + } + return HeaderMap::Iterate::Break; + }); +} + +absl::optional RequestHeaderMapImpl::getByKey(absl::string_view key) const { ASSERT(validatedLowerCaseString(key)); auto result = const_cast(this)->getExisting(key); @@ -669,7 +695,7 @@ RequestHeaderMapImpl::getTraceContext(absl::string_view key) const { return result[0]->value().getStringView(); } -void RequestHeaderMapImpl::setTraceContext(absl::string_view key, absl::string_view val) { +void RequestHeaderMapImpl::setByKey(absl::string_view key, absl::string_view val) { ASSERT(validatedLowerCaseString(key)); HeaderMapImpl::removeExisting(key); @@ -681,8 +707,7 @@ void RequestHeaderMapImpl::setTraceContext(absl::string_view key, absl::string_v HeaderMapImpl::insertByKey(std::move(new_key), std::move(new_val)); } -void RequestHeaderMapImpl::setTraceContextReferenceKey(absl::string_view key, - absl::string_view val) { +void RequestHeaderMapImpl::setByReferenceKey(absl::string_view key, absl::string_view val) { ASSERT(validatedLowerCaseString(key)); HeaderMapImpl::removeExisting(key); @@ -692,7 +717,7 @@ void RequestHeaderMapImpl::setTraceContextReferenceKey(absl::string_view key, HeaderMapImpl::insertByKey(HeaderString(key), std::move(new_val)); } -void RequestHeaderMapImpl::setTraceContextReference(absl::string_view key, absl::string_view val) { +void RequestHeaderMapImpl::setByReference(absl::string_view key, absl::string_view val) { ASSERT(validatedLowerCaseString(key)); HeaderMapImpl::removeExisting(key); diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index 79dd2d54c6bb5..5f64bc311c5ca 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -484,10 +484,15 @@ class RequestHeaderMapImpl final : public TypedHeaderMapImpl, INLINE_REQ_RESP_NUMERIC_HEADERS(DEFINE_INLINE_HEADER_NUMERIC_FUNCS) // Tracing::TraceContext - absl::optional getTraceContext(absl::string_view key) const override; - void setTraceContext(absl::string_view key, absl::string_view val) override; - void setTraceContextReferenceKey(absl::string_view key, absl::string_view val) override; - void setTraceContextReference(absl::string_view key, absl::string_view val) override; + absl::string_view protocol() const override; + absl::string_view authority() const override; + absl::string_view path() const override; + absl::string_view method() const override; + void forEach(Tracing::TraceContext::IterateCallback callback) const override; + absl::optional getByKey(absl::string_view key) const override; + void setByKey(absl::string_view key, absl::string_view val) override; + void setByReferenceKey(absl::string_view key, absl::string_view val) override; + void setByReference(absl::string_view key, absl::string_view val) override; protected: // NOTE: Because inline_headers_ is a variable size member, it must be the last member in the diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index b0fc8d02fe5e3..06badafc13e66 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -69,7 +69,9 @@ HeaderUtility::HeaderData::HeaderData(const envoy::config::route::v3::HeaderMatc break; case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kStringMatch: header_match_type_ = HeaderMatchType::StringMatch; - string_match_ = std::make_unique(config.string_match()); + string_match_ = + std::make_unique>( + config.string_match()); break; case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase:: HEADER_MATCH_SPECIFIER_NOT_SET: @@ -368,7 +370,7 @@ bool HeaderUtility::isModifiableHeader(absl::string_view header) { } HeaderUtility::HeaderValidationResult HeaderUtility::checkHeaderNameForUnderscores( - const std::string& header_name, + absl::string_view header_name, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action, Stats::Counter& dropped_headers_with_underscores, diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index 50b1d571f60e1..1053f893c34f4 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -250,7 +250,7 @@ class HeaderUtility { * headers_with_underscores_action. */ static HeaderValidationResult checkHeaderNameForUnderscores( - const std::string& header_name, + absl::string_view header_name, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action, Stats::Counter& dropped_headers_with_underscores, diff --git a/source/common/http/headers.h b/source/common/http/headers.h index 50edd1fa92546..ccae31bf706c3 100644 --- a/source/common/http/headers.h +++ b/source/common/http/headers.h @@ -183,6 +183,8 @@ class HeaderValues { const LowerCaseString EnvoyUpstreamServiceTime{absl::StrCat(prefix(), "-upstream-service-time")}; const LowerCaseString EnvoyUpstreamHealthCheckedCluster{ absl::StrCat(prefix(), "-upstream-healthchecked-cluster")}; + const LowerCaseString EnvoyUpstreamStreamDurationMs{ + absl::StrCat(prefix(), "-upstream-stream-duration-ms")}; const LowerCaseString EnvoyDecoratorOperation{absl::StrCat(prefix(), "-decorator-operation")}; const LowerCaseString Expect{"expect"}; const LowerCaseString ForwardedClientCert{"x-forwarded-client-cert"}; diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index cb0af92e83bc9..de88ae236c440 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -350,6 +350,21 @@ void StreamEncoderImpl::resetStream(StreamResetReason reason) { connection_.onResetStreamBase(reason); } +void ResponseEncoderImpl::resetStream(StreamResetReason reason) { + // Clear the downstream on the account since we're resetting the downstream. + if (buffer_memory_account_) { + buffer_memory_account_->clearDownstream(); + } + + // For H1, we use idleTimeouts to cancel streams unless there was an + // explicit protocol error prior to sending a response to the downstream + // in which case we send a local reply. + // TODO(kbaichoo): If we want snappier resets of H1 streams we can + // 1) Send local reply if no response data sent yet + // 2) Invoke the idle timeout sooner to close underlying connection + StreamEncoderImpl::resetStream(reason); +} + void StreamEncoderImpl::readDisable(bool disable) { if (disable) { ++read_disable_calls_; @@ -365,7 +380,7 @@ void StreamEncoderImpl::readDisable(bool disable) { uint32_t StreamEncoderImpl::bufferLimit() { return connection_.bufferLimit(); } const Network::Address::InstanceConstSharedPtr& StreamEncoderImpl::connectionLocalAddress() { - return connection_.connection().addressProvider().localAddress(); + return connection_.connection().connectionInfoProvider().localAddress(); } static const char RESPONSE_PREFIX[] = "HTTP/1.1 "; diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index d67412706a712..b6f32a16d5f6b 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -69,8 +69,12 @@ class StreamEncoderImpl : public virtual StreamEncoder, // require a flush timeout not already covered by other timeouts. } - void setAccount(Buffer::BufferMemoryAccountSharedPtr) override { - // TODO(kbaichoo): implement account tracking for H1. + void setAccount(Buffer::BufferMemoryAccountSharedPtr account) override { + // TODO(kbaichoo): implement account tracking for H1. Particularly, binding + // the account to the buffers used. The current wiring is minimal, and used + // to ensure the memory_account gets notified that the downstream request is + // closing. + buffer_memory_account_ = account; } void setIsResponseToHeadRequest(bool value) { is_response_to_head_request_ = value; } @@ -88,6 +92,7 @@ class StreamEncoderImpl : public virtual StreamEncoder, static const std::string CRLF; static const std::string LAST_CHUNK; + Buffer::BufferMemoryAccountSharedPtr buffer_memory_account_; ConnectionImpl& connection_; uint32_t read_disable_calls_{}; bool disable_chunk_encoding_ : 1; @@ -134,6 +139,18 @@ class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { : StreamEncoderImpl(connection), stream_error_on_invalid_http_message_(stream_error_on_invalid_http_message) {} + ~ResponseEncoderImpl() override { + // Only the downstream stream should clear the downstream of the + // memory account. + // + // There are cases where a corresponding upstream stream dtor might + // be called, but the downstream stream isn't going to terminate soon + // such as StreamDecoderFilterCallbacks::recreateStream(). + if (buffer_memory_account_) { + buffer_memory_account_->clearDownstream(); + } + } + bool startedResponse() { return started_response_; } // Http::ResponseEncoder @@ -145,6 +162,9 @@ class ResponseEncoderImpl : public StreamEncoderImpl, public ResponseEncoder { return stream_error_on_invalid_http_message_; } + // Http1::StreamEncoderImpl + void resetStream(StreamResetReason reason) override; + private: bool started_response_{}; const bool stream_error_on_invalid_http_message_; diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 0ae50eb547d27..2ccc1d32f9f46 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -130,7 +130,7 @@ template static T* removeConst(const void* object) { } ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit) - : parent_(parent), + : MultiplexedStreamImplBase(parent.connection_.dispatcher()), parent_(parent), pending_recv_data_(parent_.connection_.dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->pendingRecvBufferLowWatermark(); }, [this]() -> void { this->pendingRecvBufferHighWatermark(); }, @@ -149,14 +149,28 @@ ConnectionImpl::StreamImpl::StreamImpl(ConnectionImpl& parent, uint32_t buffer_l } } -ConnectionImpl::StreamImpl::~StreamImpl() { ASSERT(stream_idle_timer_ == nullptr); } - void ConnectionImpl::StreamImpl::destroy() { - disarmStreamIdleTimer(); + MultiplexedStreamImplBase::destroy(); parent_.stats_.streams_active_.dec(); parent_.stats_.pending_send_bytes_.sub(pending_send_data_->length()); } +void ConnectionImpl::ServerStreamImpl::destroy() { + // Only the downstream stream should clear the downstream of the + // memory account. + // This occurs in destroy as we want to ensure the Stream does not get + // reset called on it from the account. + // + // There are cases where a corresponding upstream stream dtor might + // be called, but the downstream stream isn't going to terminate soon + // such as StreamDecoderFilterCallbacks::recreateStream(). + if (buffer_memory_account_) { + buffer_memory_account_->clearDownstream(); + } + + StreamImpl::destroy(); +} + static void insertHeader(std::vector& headers, const HeaderEntry& header) { uint8_t flags = 0; if (header.key().isReference()) { @@ -186,8 +200,7 @@ void ConnectionImpl::ServerStreamImpl::encode100ContinueHeaders(const ResponseHe encodeHeaders(headers, false); } -void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector& final_headers, - bool end_stream) { +void ConnectionImpl::StreamImpl::encodeHeadersBase(const HeaderMap& headers, bool end_stream) { nghttp2_data_provider provider; if (!end_stream) { provider.source.ptr = this; @@ -199,7 +212,7 @@ void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector } local_end_stream_ = end_stream; - submitHeaders(final_headers, end_stream ? nullptr : &provider); + submitHeaders(headers, end_stream ? nullptr : &provider); if (parent_.sendPendingFramesAndHandleError()) { // Intended to check through coverage that this error case is tested return; @@ -213,13 +226,12 @@ Status ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& h RETURN_IF_ERROR(HeaderUtility::checkRequiredRequestHeaders(headers)); // This must exist outside of the scope of isUpgrade as the underlying memory is // needed until encodeHeadersBase has been called. - std::vector final_headers; Http::RequestHeaderMapPtr modified_headers; if (Http::Utility::isUpgrade(headers)) { modified_headers = createHeaderMap(headers); upgrade_type_ = std::string(headers.getUpgradeValue()); Http::Utility::transformUpgradeRequestFromH1toH2(*modified_headers); - buildHeaders(final_headers, *modified_headers); + encodeHeadersBase(*modified_headers, end_stream); } else if (headers.Method() && headers.Method()->value() == "CONNECT") { // If this is not an upgrade style connect (above branch) it is a bytestream // connect and should have :path and :protocol set accordingly @@ -232,11 +244,10 @@ Status ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& h if (!headers.Path()) { modified_headers->setPath("/"); } - buildHeaders(final_headers, *modified_headers); + encodeHeadersBase(*modified_headers, end_stream); } else { - buildHeaders(final_headers, headers); + encodeHeadersBase(headers, end_stream); } - encodeHeadersBase(final_headers, end_stream); return okStatus(); } @@ -247,16 +258,14 @@ void ConnectionImpl::ServerStreamImpl::encodeHeaders(const ResponseHeaderMap& he // This must exist outside of the scope of isUpgrade as the underlying memory is // needed until encodeHeadersBase has been called. - std::vector final_headers; Http::ResponseHeaderMapPtr modified_headers; if (Http::Utility::isUpgrade(headers)) { modified_headers = createHeaderMap(headers); Http::Utility::transformUpgradeResponseFromH1toH2(*modified_headers); - buildHeaders(final_headers, *modified_headers); + encodeHeadersBase(*modified_headers, end_stream); } else { - buildHeaders(final_headers, headers); + encodeHeadersBase(headers, end_stream); } - encodeHeadersBase(final_headers, end_stream); } void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { @@ -271,7 +280,7 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { trailers.empty() && parent_.skip_encoding_empty_trailers_; if (!skip_encoding_empty_trailers) { pending_trailers_to_encode_ = cloneTrailers(trailers); - createPendingFlushTimer(); + onLocalEndStream(); } } else { submitTrailers(trailers); @@ -459,34 +468,29 @@ void ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t parent_.connection_.write(output, false); } -void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, +void ConnectionImpl::ClientStreamImpl::submitHeaders(const HeaderMap& headers, nghttp2_data_provider* provider) { ASSERT(stream_id_ == -1); + std::vector final_headers; + buildHeaders(final_headers, headers); stream_id_ = nghttp2_submit_request(parent_.session_, nullptr, final_headers.data(), final_headers.size(), provider, base()); ASSERT(stream_id_ > 0); } -void ConnectionImpl::ServerStreamImpl::submitHeaders(const std::vector& final_headers, +void ConnectionImpl::ServerStreamImpl::submitHeaders(const HeaderMap& headers, nghttp2_data_provider* provider) { ASSERT(stream_id_ != -1); + std::vector final_headers; + buildHeaders(final_headers, headers); int rc = nghttp2_submit_response(parent_.session_, stream_id_, final_headers.data(), final_headers.size(), provider); ASSERT(rc == 0); } -void ConnectionImpl::ServerStreamImpl::createPendingFlushTimer() { - ASSERT(stream_idle_timer_ == nullptr); - if (stream_idle_timeout_.count() > 0) { - stream_idle_timer_ = - parent_.connection_.dispatcher().createTimer([this] { onPendingFlushTimer(); }); - stream_idle_timer_->enableTimer(stream_idle_timeout_); - } -} - void ConnectionImpl::StreamImpl::onPendingFlushTimer() { ENVOY_CONN_LOG(debug, "pending stream flush timeout", parent_.connection_); - stream_idle_timer_.reset(); + MultiplexedStreamImplBase::onPendingFlushTimer(); parent_.stats_.tx_flush_timeout_.inc(); ASSERT(local_end_stream_ && !local_end_stream_sent_); // This will emit a reset frame for this stream and close the stream locally. No reset callbacks @@ -525,11 +529,20 @@ void ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool e // Intended to check through coverage that this error case is tested return; } - if (local_end_stream_ && pending_send_data_->length() > 0) { - createPendingFlushTimer(); + if (local_end_stream_) { + onLocalEndStream(); } } +void ConnectionImpl::ServerStreamImpl::resetStream(StreamResetReason reason) { + // Clear the downstream on the account since we're resetting the downstream. + if (buffer_memory_account_) { + buffer_memory_account_->clearDownstream(); + } + + StreamImpl::resetStream(reason); +} + void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { // Higher layers expect calling resetStream() to immediately raise reset callbacks. runResetCallbacks(reason); @@ -537,8 +550,9 @@ void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { // If we submit a reset, nghttp2 will cancel outbound frames that have not yet been sent. // We want these frames to go out so we defer the reset until we send all of the frames that // end the local stream. - if (local_end_stream_ && !local_end_stream_sent_) { - parent_.pending_deferred_reset_ = true; + if (useDeferredReset() && local_end_stream_ && !local_end_stream_sent_) { + ASSERT(parent_.getStream(stream_id_) != nullptr); + parent_.pending_deferred_reset_streams_.emplace(stream_id_, this); deferred_reset_ = reason; ENVOY_CONN_LOG(trace, "deferred reset stream", parent_.connection_); } else { @@ -605,8 +619,9 @@ ConnectionImpl::ConnectionImpl(Network::Connection& connection, CodecStats& stat protocol_constraints_(stats, http2_options), skip_encoding_empty_trailers_(Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.http2_skip_encoding_empty_trailers")), - dispatching_(false), raised_goaway_(false), pending_deferred_reset_(false), - random_(random_generator), + skip_dispatching_frames_for_closed_connection_(Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.skip_dispatching_frames_for_closed_connection")), + dispatching_(false), raised_goaway_(false), random_(random_generator), last_received_data_time_(connection_.dispatcher().timeSource().monotonicTime()) { if (http2_options.has_connection_keepalive()) { keepalive_interval_ = std::chrono::milliseconds( @@ -674,11 +689,22 @@ void ConnectionImpl::onKeepaliveResponse() { } void ConnectionImpl::onKeepaliveResponseTimeout() { - ENVOY_CONN_LOG(debug, "Closing connection due to keepalive timeout", connection_); + ENVOY_CONN_LOG_EVENT(debug, "h2_ping_timeout", "Closing connection due to keepalive timeout", + connection_); stats_.keepalive_timeout_.inc(); connection_.close(Network::ConnectionCloseType::NoFlush); } +bool ConnectionImpl::slowContainsStreamId(int32_t stream_id) const { + for (const auto& stream : active_streams_) { + if (stream->stream_id_ == stream_id) { + return true; + } + } + + return false; +} + Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { ScopeTrackerScopeState scope(this, connection_.dispatcher()); ENVOY_CONN_LOG(trace, "dispatching {} bytes", connection_, data.length()); @@ -724,14 +750,20 @@ Http::Status ConnectionImpl::dispatch(Buffer::Instance& data) { } const ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) const { - return static_cast(nghttp2_session_get_stream_user_data(session_, stream_id)); + // Delegate to the non-const version. + return const_cast(this)->getStream(stream_id); } ConnectionImpl::StreamImpl* ConnectionImpl::getStream(int32_t stream_id) { - return static_cast(nghttp2_session_get_stream_user_data(session_, stream_id)); + StreamImpl* stream = + static_cast(nghttp2_session_get_stream_user_data(session_, stream_id)); + SLOW_ASSERT(stream != nullptr || !slowContainsStreamId(stream_id)); + return stream; } int ConnectionImpl::onData(int32_t stream_id, const uint8_t* data, size_t len) { + ASSERT(!skip_dispatching_frames_for_closed_connection_ || + connection_.state() == Network::Connection::State::Open); StreamImpl* stream = getStream(stream_id); // If this results in buffering too much data, the watermark buffer will call // pendingRecvBufferHighWatermark, resulting in ++read_disable_count_ @@ -780,6 +812,9 @@ Status ConnectionImpl::protocolErrorForTest() { Status ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { ENVOY_CONN_LOG(trace, "about to recv frame type={}, flags={}, stream_id={}", connection_, static_cast(hd->type), static_cast(hd->flags), hd->stream_id); + ASSERT(!skip_dispatching_frames_for_closed_connection_ || + connection_.state() == Network::Connection::State::Open); + current_stream_id_ = hd->stream_id; // Track all the frames without padding here, since this is the only callback we receive @@ -805,6 +840,8 @@ enum GoAwayErrorCode ngHttp2ErrorCodeToErrorCode(uint32_t code) noexcept { Status ConnectionImpl::onFrameReceived(const nghttp2_frame* frame) { ENVOY_CONN_LOG(trace, "recv frame type={}", connection_, static_cast(frame->hd.type)); + ASSERT(!skip_dispatching_frames_for_closed_connection_ || + connection_.state() == Network::Connection::State::Open); // onFrameReceived() is called with a complete HEADERS frame assembled from all the HEADERS // and CONTINUATION frames, but we track them separately: HEADERS frames in onBeginHeaders() @@ -1077,6 +1114,8 @@ int ConnectionImpl::onStreamClose(int32_t stream_id, uint32_t error_code) { stream->destroy(); current_stream_id_.reset(); + // TODO(antoniovicente) Test coverage for onCloseStream before deferred reset handling happens. + pending_deferred_reset_streams_.erase(stream->stream_id_); connection_.dispatcher().deferredDelete(stream->removeFromList(active_streams_)); // Any unconsumed data must be consumed before the stream is deleted. // nghttp2 does not appear to track this internally, and any stream deleted @@ -1185,12 +1224,15 @@ Status ConnectionImpl::sendPendingFrames() { // to short circuit requests. In the best effort case, we complete the stream before // resetting. In other cases, we just do the reset now which will blow away pending data // frames and release any memory associated with the stream. - if (pending_deferred_reset_) { - pending_deferred_reset_ = false; - for (auto& stream : active_streams_) { - if (stream->deferred_reset_) { - stream->resetStreamWorker(stream->deferred_reset_.value()); - } + if (!pending_deferred_reset_streams_.empty()) { + while (!pending_deferred_reset_streams_.empty()) { + auto it = pending_deferred_reset_streams_.begin(); + auto* stream = it->second; + // Sanity check: the stream's id matches the map key. + ASSERT(it->first == stream->stream_id_); + pending_deferred_reset_streams_.erase(it); + ASSERT(stream->deferred_reset_); + stream->resetStreamWorker(stream->deferred_reset_.value()); } RETURN_IF_ERROR(sendPendingFrames()); } @@ -1280,6 +1322,11 @@ int ConnectionImpl::setAndCheckNghttp2CallbackStatus(Status&& status) { // Keep the error status that caused the original failure. Subsequent // error statuses are silently discarded. nghttp2_callback_status_.Update(std::move(status)); + if (skip_dispatching_frames_for_closed_connection_ && nghttp2_callback_status_.ok() && + connection_.state() != Network::Connection::State::Open) { + nghttp2_callback_status_ = codecProtocolError("Connection was closed while dispatching frames"); + } + return nghttp2_callback_status_.ok() ? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; } @@ -1471,7 +1518,7 @@ void ConnectionImpl::dumpState(std::ostream& os, int indent_level) const { << DUMP_MEMBER(allow_metadata_) << DUMP_MEMBER(stream_error_on_invalid_http_messaging_) << DUMP_MEMBER(is_outbound_flood_monitored_control_frame_) << DUMP_MEMBER(skip_encoding_empty_trailers_) << DUMP_MEMBER(dispatching_) - << DUMP_MEMBER(raised_goaway_) << DUMP_MEMBER(pending_deferred_reset_) << '\n'; + << DUMP_MEMBER(raised_goaway_) << DUMP_MEMBER(pending_deferred_reset_streams_.size()) << '\n'; // Dump the protocol constraints DUMP_DETAILS(&protocol_constraints_); @@ -1641,6 +1688,8 @@ int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& na // The client code explicitly does not currently support push promise. ASSERT(frame->hd.type == NGHTTP2_HEADERS); ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS); + ASSERT(!skip_dispatching_frames_for_closed_connection_ || + connection_.state() == Network::Connection::State::Open); return saveHeader(frame, std::move(name), std::move(value)); } @@ -1710,6 +1759,8 @@ ServerConnectionImpl::ServerConnectionImpl( Status ServerConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { // For a server connection, we should never get push promise frames. ASSERT(frame->hd.type == NGHTTP2_HEADERS); + ASSERT(!skip_dispatching_frames_for_closed_connection_ || + connection_.state() == Network::Connection::State::Open); RETURN_IF_ERROR(trackInboundFrames(&frame->hd, frame->headers.padlen)); if (frame->headers.cat != NGHTTP2_HCAT_REQUEST) { diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index 6b86ee41b9aa2..9b3363e740b9e 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -39,6 +39,8 @@ namespace Envoy { namespace Http { namespace Http2 { +class Http2CodecImplTestFixture; + // This is not the full client magic, but it's the smallest size that should be able to // differentiate between HTTP/1 and HTTP/2. const std::string CLIENT_MAGIC_PREFIX = "PRI * HTTP/2"; @@ -181,25 +183,16 @@ class ConnectionImpl : public virtual Connection, * Base class for client and server side streams. */ struct StreamImpl : public virtual StreamEncoder, - public Stream, public LinkedObject, public Event::DeferredDeletable, - public StreamCallbackHelper, + public Http::MultiplexedStreamImplBase, public ScopeTrackedObject { StreamImpl(ConnectionImpl& parent, uint32_t buffer_limit); - ~StreamImpl() override; - // TODO(mattklein123): Optimally this would be done in the destructor but there are currently - // deferred delete lifetime issues that need sorting out if the destructor of the stream is - // going to be able to refer to the parent connection. - void destroy(); - void disarmStreamIdleTimer() { - if (stream_idle_timer_ != nullptr) { - // To ease testing and the destructor assertion. - stream_idle_timer_->disableTimer(); - stream_idle_timer_.reset(); - } - } + + // Http::MultiplexedStreamImplBase + void destroy() override; + void onPendingFlushTimer() override; StreamImpl* base() { return this; } ssize_t onDataSourceRead(uint64_t length, uint32_t* data_flags); @@ -207,18 +200,19 @@ class ConnectionImpl : public virtual Connection, void resetStreamWorker(StreamResetReason reason); static void buildHeaders(std::vector& final_headers, const HeaderMap& headers); void saveHeader(HeaderString&& name, HeaderString&& value); - void encodeHeadersBase(const std::vector& final_headers, bool end_stream); - virtual void submitHeaders(const std::vector& final_headers, - nghttp2_data_provider* provider) PURE; + void encodeHeadersBase(const HeaderMap& headers, bool end_stream); + virtual void submitHeaders(const HeaderMap& headers, nghttp2_data_provider* provider) PURE; void encodeTrailersBase(const HeaderMap& headers); void submitTrailers(const HeaderMap& trailers); void submitMetadata(uint8_t flags); + // Returns true if the stream should defer the local reset stream until after the next call to + // sendPendingFrames so pending outbound frames have one final chance to be flushed. If we + // submit a reset, nghttp2 will cancel outbound frames that have not yet been sent. + virtual bool useDeferredReset() const PURE; virtual StreamDecoder& decoder() PURE; virtual HeaderMap& headers() PURE; virtual void allocTrailers() PURE; virtual HeaderMapPtr cloneTrailers(const HeaderMap& trailers) PURE; - virtual void createPendingFlushTimer() PURE; - void onPendingFlushTimer(); // Http::StreamEncoder void encodeData(Buffer::Instance& data, bool end_stream) override; @@ -233,12 +227,9 @@ class ConnectionImpl : public virtual Connection, void readDisable(bool disable) override; uint32_t bufferLimit() override { return pending_recv_data_->highWatermark(); } const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override { - return parent_.connection_.addressProvider().localAddress(); + return parent_.connection_.connectionInfoProvider().localAddress(); } absl::string_view responseDetails() override { return details_; } - void setFlushTimeout(std::chrono::milliseconds timeout) override { - stream_idle_timeout_ = timeout; - } void setAccount(Buffer::BufferMemoryAccountSharedPtr account) override; // ScopeTrackedObject @@ -317,9 +308,12 @@ class ConnectionImpl : public virtual Connection, bool pending_send_buffer_high_watermark_called_ : 1; bool reset_due_to_messaging_error_ : 1; absl::string_view details_; - // See HttpConnectionManager.stream_idle_timeout. - std::chrono::milliseconds stream_idle_timeout_{}; - Event::TimerPtr stream_idle_timer_; + + protected: + // Http::MultiplexedStreamImplBase + bool hasPendingData() override { + return pending_send_data_->length() > 0 || pending_trailers_to_encode_ != nullptr; + } }; using StreamImplPtr = std::unique_ptr; @@ -333,9 +327,14 @@ class ConnectionImpl : public virtual Connection, : StreamImpl(parent, buffer_limit), response_decoder_(response_decoder), headers_or_trailers_(ResponseHeaderMapImpl::create()) {} + // Http::MultiplexedStreamImplBase + // Client streams do not need a flush timer because we currently assume that any failure + // to flush would be covered by a request/stream/etc. timeout. + void setFlushTimeout(std::chrono::milliseconds /*timeout*/) override {} // StreamImpl - void submitHeaders(const std::vector& final_headers, - nghttp2_data_provider* provider) override; + void submitHeaders(const HeaderMap& headers, nghttp2_data_provider* provider) override; + // Do not use deferred reset on upstream connections. + bool useDeferredReset() const override { return false; } StreamDecoder& decoder() override { return response_decoder_; } void decodeHeaders() override; void decodeTrailers() override; @@ -358,10 +357,6 @@ class ConnectionImpl : public virtual Connection, HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { return createHeaderMap(trailers); } - void createPendingFlushTimer() override { - // Client streams do not create a flush timer because we currently assume that any failure - // to flush would be covered by a request/stream/etc. timeout. - } // RequestEncoder Status encodeHeaders(const RequestHeaderMap& headers, bool end_stream) override; @@ -388,8 +383,12 @@ class ConnectionImpl : public virtual Connection, : StreamImpl(parent, buffer_limit), headers_or_trailers_(RequestHeaderMapImpl::create()) {} // StreamImpl - void submitHeaders(const std::vector& final_headers, - nghttp2_data_provider* provider) override; + void destroy() override; + void submitHeaders(const HeaderMap& headers, nghttp2_data_provider* provider) override; + // Enable deferred reset on downstream connections so outbound HTTP internal error replies are + // written out before force resetting the stream, assuming there is enough H2 connection flow + // control window is available. + bool useDeferredReset() const override { return true; } StreamDecoder& decoder() override { return *request_decoder_; } void decodeHeaders() override; void decodeTrailers() override; @@ -406,7 +405,7 @@ class ConnectionImpl : public virtual Connection, HeaderMapPtr cloneTrailers(const HeaderMap& trailers) override { return createHeaderMap(trailers); } - void createPendingFlushTimer() override; + void resetStream(StreamResetReason reason) override; // ResponseEncoder void encode100ContinueHeaders(const ResponseHeaderMap& headers) override; @@ -529,6 +528,7 @@ class ConnectionImpl : public virtual Connection, // controlled by "envoy.reloadable_features.http2_skip_encoding_empty_trailers" runtime feature // flag. const bool skip_encoding_empty_trailers_; + const bool skip_dispatching_frames_for_closed_connection_; // dumpState helper method. virtual void dumpStreams(std::ostream& os, int indent_level) const; @@ -539,6 +539,8 @@ class ConnectionImpl : public virtual Connection, const MonotonicTime& lastReceivedDataTime() { return last_received_data_time_; } private: + friend class Http2CodecImplTestFixture; + virtual ConnectionCallbacks& callbacks() PURE; virtual Status onBeginHeaders(const nghttp2_frame* frame) PURE; int onData(int32_t stream_id, const uint8_t* data, size_t len); @@ -561,13 +563,18 @@ class ConnectionImpl : public virtual Connection, virtual Status trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE; void onKeepaliveResponse(); void onKeepaliveResponseTimeout(); + bool slowContainsStreamId(int32_t stream_id) const; virtual StreamResetReason getMessagingErrorResetReason() const PURE; // Tracks the current slice we're processing in the dispatch loop. const Buffer::RawSlice* current_slice_ = nullptr; + // Streams that are pending deferred reset. Using an ordered map provides determinism in the rare + // case where there are multiple streams waiting for deferred reset. The stream id is also used to + // remove streams from the map when they are closed in order to avoid calls to resetStreamWorker + // after the stream has been removed from the active list. + std::map pending_deferred_reset_streams_; bool dispatching_ : 1; bool raised_goaway_ : 1; - bool pending_deferred_reset_ : 1; Event::SchedulableCallbackPtr protocol_constraint_violation_callback_; Random::RandomGenerator& random_; MonotonicTime last_received_data_time_{}; diff --git a/source/common/http/http2/metadata_decoder.cc b/source/common/http/http2/metadata_decoder.cc index 3efd9346e5910..f6c580df956c4 100644 --- a/source/common/http/http2/metadata_decoder.cc +++ b/source/common/http/http2/metadata_decoder.cc @@ -22,7 +22,7 @@ bool MetadataDecoder::receiveMetadata(const uint8_t* data, size_t len) { ASSERT(data != nullptr && len != 0); payload_.add(data, len); - total_payload_size_ += payload_.length(); + total_payload_size_ += len; return total_payload_size_ <= max_payload_size_bound_; } diff --git a/source/common/http/http2/metadata_decoder.h b/source/common/http/http2/metadata_decoder.h index 4a3c9ad301ec6..e5f91925d5083 100644 --- a/source/common/http/http2/metadata_decoder.h +++ b/source/common/http/http2/metadata_decoder.h @@ -44,6 +44,11 @@ class MetadataDecoder : Logger::Loggable { */ bool onMetadataFrameComplete(bool end_metadata); + /** + * Returns the total size of METADATA frame payloads received. + */ + uint64_t totalPayloadSize() const { return total_payload_size_; } + private: friend class MetadataEncoderDecoderTest_VerifyEncoderDecoderOnMultipleMetadataMaps_Test; friend class MetadataEncoderDecoderTest_VerifyEncoderDecoderMultipleMetadataReachSizeLimit_Test; diff --git a/source/common/http/http3/codec_stats.h b/source/common/http/http3/codec_stats.h index 47b69965896e3..9e7ca5859b2ff 100644 --- a/source/common/http/http3/codec_stats.h +++ b/source/common/http/http3/codec_stats.h @@ -18,12 +18,8 @@ namespace Http3 { COUNTER(rx_reset) \ COUNTER(tx_reset) \ COUNTER(metadata_not_supported_error) \ - COUNTER(quic_version_43) \ - COUNTER(quic_version_46) \ - COUNTER(quic_version_50) \ - COUNTER(quic_version_51) \ - COUNTER(quic_version_h3_29) \ - COUNTER(quic_version_rfc_v1) + COUNTER(quic_version_rfc_v1) \ + COUNTER(tx_flush_timeout) /** * Wrapper struct for the HTTP/3 codec stats. @see stats_macros.h diff --git a/source/common/http/match_wrapper/config.cc b/source/common/http/match_wrapper/config.cc index 3ed2eece72a21..30c70adb2fac5 100644 --- a/source/common/http/match_wrapper/config.cc +++ b/source/common/http/match_wrapper/config.cc @@ -105,10 +105,17 @@ Envoy::Http::FilterFactoryCb MatchWrapperConfig::createFilterFactoryFromProtoTyp MatchTreeValidationVisitor validation_visitor(*factory.matchingRequirements()); Envoy::Http::Matching::HttpFilterActionContext action_context{prefix, context}; - auto match_tree = Matcher::MatchTreeFactory( - action_context, context.getServerFactoryContext(), validation_visitor) - .create(proto_config.matcher()); + Matcher::MatchTreeFactory + matcher_factory(action_context, context.getServerFactoryContext(), validation_visitor); + Matcher::MatchTreeFactoryCb factory_cb; + if (proto_config.has_xds_matcher()) { + factory_cb = matcher_factory.create(proto_config.xds_matcher()); + } else if (proto_config.has_matcher()) { + factory_cb = matcher_factory.create(proto_config.matcher()); + } else { + throw EnvoyException("one of `matcher` and `matcher_tree` must be set."); + } if (!validation_visitor.errors().empty()) { // TODO(snowp): Output all violations. @@ -116,8 +123,8 @@ Envoy::Http::FilterFactoryCb MatchWrapperConfig::createFilterFactoryFromProtoTyp validation_visitor.errors()[0])); } - return [filter_factory, match_tree](Envoy::Http::FilterChainFactoryCallbacks& callbacks) -> void { - DelegatingFactoryCallbacks delegated_callbacks(callbacks, match_tree()); + return [filter_factory, factory_cb](Envoy::Http::FilterChainFactoryCallbacks& callbacks) -> void { + DelegatingFactoryCallbacks delegated_callbacks(callbacks, factory_cb()); return filter_factory(delegated_callbacks); }; diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index cc472f8e1cecd..9a803b9d8e906 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -254,44 +254,79 @@ bool maybeAdjustForIpv6(absl::string_view absolute_url, uint64_t& offset, uint64 return true; } -std::string parseCookie(const HeaderMap& headers, const std::string& key, - const std::string& cookie) { - - std::string ret; - - headers.iterateReverse([&key, &ret, &cookie](const HeaderEntry& header) -> HeaderMap::Iterate { - // Find the cookie headers in the request (typically, there's only one). - if (header.key() == cookie) { - - // Split the cookie header into individual cookies. - for (const auto& s : StringUtil::splitToken(header.value().getStringView(), ";")) { - // Find the key part of the cookie (i.e. the name of the cookie). - size_t first_non_space = s.find_first_not_of(' '); - size_t equals_index = s.find('='); - if (equals_index == absl::string_view::npos) { - // The cookie is malformed if it does not have an `=`. Continue - // checking other cookies in this header. - continue; - } - const absl::string_view k = s.substr(first_non_space, equals_index - first_non_space); - // If the key matches, parse the value from the rest of the cookie string. - if (k == key) { - absl::string_view v = s.substr(equals_index + 1, s.size() - 1); - - // Cookie values may be wrapped in double quotes. - // https://tools.ietf.org/html/rfc6265#section-4.1.1 - if (v.size() >= 2 && v.back() == '"' && v[0] == '"') { - v = v.substr(1, v.size() - 2); - } - ret = std::string{v}; - return HeaderMap::Iterate::Break; - } +void forEachCookie( + const HeaderMap& headers, const LowerCaseString& cookie_header, + const std::function& cookie_consumer) { + const Http::HeaderMap::GetResult cookie_headers = headers.get(cookie_header); + + for (size_t index = 0; index < cookie_headers.size(); index++) { + auto cookie_header_value = cookie_headers[index]->value().getStringView(); + + // Split the cookie header into individual cookies. + for (const auto& s : StringUtil::splitToken(cookie_header_value, ";")) { + // Find the key part of the cookie (i.e. the name of the cookie). + size_t first_non_space = s.find_first_not_of(' '); + size_t equals_index = s.find('='); + if (equals_index == absl::string_view::npos) { + // The cookie is malformed if it does not have an `=`. Continue + // checking other cookies in this header. + continue; + } + absl::string_view k = s.substr(first_non_space, equals_index - first_non_space); + absl::string_view v = s.substr(equals_index + 1, s.size() - 1); + + // Cookie values may be wrapped in double quotes. + // https://tools.ietf.org/html/rfc6265#section-4.1.1 + if (v.size() >= 2 && v.back() == '"' && v[0] == '"') { + v = v.substr(1, v.size() - 2); + } + + if (!cookie_consumer(k, v)) { + return; } } - return HeaderMap::Iterate::Continue; + } +} + +std::string parseCookie(const HeaderMap& headers, const std::string& key, + const LowerCaseString& cookie) { + std::string value; + + // Iterate over each cookie & return if its value is not empty. + forEachCookie(headers, cookie, [&key, &value](absl::string_view k, absl::string_view v) -> bool { + if (key == k) { + value = std::string{v}; + return false; + } + + // continue iterating until a cookie that matches `key` is found. + return true; }); - return ret; + return value; +} + +absl::flat_hash_map +Utility::parseCookies(const RequestHeaderMap& headers) { + return Utility::parseCookies(headers, [](absl::string_view) -> bool { return true; }); +} + +absl::flat_hash_map +Utility::parseCookies(const RequestHeaderMap& headers, + const std::function& key_filter) { + absl::flat_hash_map cookies; + + forEachCookie(headers, Http::Headers::get().Cookie, + [&cookies, &key_filter](absl::string_view k, absl::string_view v) -> bool { + if (key_filter(k)) { + cookies.emplace(k, v); + } + + // continue iterating until all cookies are processed. + return true; + }); + + return cookies; } bool Utility::Url::initialize(absl::string_view absolute_url, bool is_connect) { @@ -429,11 +464,12 @@ std::string Utility::stripQueryString(const HeaderString& path) { } std::string Utility::parseCookieValue(const HeaderMap& headers, const std::string& key) { - return parseCookie(headers, key, Http::Headers::get().Cookie.get()); + // TODO(wbpcode): Modify the headers parameter type to 'RequestHeaderMap'. + return parseCookie(headers, key, Http::Headers::get().Cookie); } std::string Utility::parseSetCookieValue(const Http::HeaderMap& headers, const std::string& key) { - return parseCookie(headers, key, Http::Headers::get().SetCookie.get()); + return parseCookie(headers, key, Http::Headers::get().SetCookie); } std::string Utility::makeSetCookieValue(const std::string& key, const std::string& value, @@ -549,7 +585,8 @@ void Utility::sendLocalReply(const bool& is_reset, const EncodeFunctions& encode // status. // JsonFormatter adds a '\n' at the end. For header value, it should be removed. // https://github.com/envoyproxy/envoy/blob/main/source/common/formatter/substitution_formatter.cc#L129 - if (body_text[body_text.length() - 1] == '\n') { + if (content_type == Headers::get().ContentTypeValues.Json && + body_text[body_text.length() - 1] == '\n') { body_text = body_text.substr(0, body_text.length() - 1); } response_headers->setGrpcMessage(PercentEncoding::encode(body_text)); @@ -921,35 +958,6 @@ void Utility::transformUpgradeResponseFromH2toH1(ResponseHeaderMap& headers, } } -void Utility::traversePerFilterConfigGeneric( - const std::string& filter_name, const Router::RouteConstSharedPtr& route, - std::function cb) { - if (!route) { - return; - } - - const Router::RouteEntry* routeEntry = route->routeEntry(); - - if (routeEntry != nullptr) { - auto maybe_vhost_config = routeEntry->virtualHost().perFilterConfig(filter_name); - if (maybe_vhost_config != nullptr) { - cb(*maybe_vhost_config); - } - } - - auto maybe_route_config = route->perFilterConfig(filter_name); - if (maybe_route_config != nullptr) { - cb(*maybe_route_config); - } - - if (routeEntry != nullptr) { - auto maybe_weighted_cluster_config = routeEntry->perFilterConfig(filter_name); - if (maybe_weighted_cluster_config != nullptr) { - cb(*maybe_weighted_cluster_config); - } - } -} - std::string Utility::PercentEncoding::encode(absl::string_view value, absl::string_view reserved_chars) { absl::flat_hash_set reserved_char_set{reserved_chars.begin(), reserved_chars.end()}; diff --git a/source/common/http/utility.h b/source/common/http/utility.h index 20d5ae98b223e..d2ba67613c35d 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -255,6 +255,23 @@ std::string stripQueryString(const HeaderString& path); **/ std::string parseCookieValue(const HeaderMap& headers, const std::string& key); +/** + * Parse cookies from header into a map. + * @param headers supplies the headers to get cookies from. + * @param key_filter predicate that returns true for every cookie key to be included. + * @return absl::flat_hash_map cookie map. + **/ +absl::flat_hash_map +parseCookies(const RequestHeaderMap& headers, + const std::function& key_filter); + +/** + * Parse cookies from header into a map. + * @param headers supplies the headers to get cookies from. + * @return absl::flat_hash_map cookie map. + **/ +absl::flat_hash_map parseCookies(const RequestHeaderMap& headers); + /** * Parse a particular value out of a set-cookie * @param headers supplies the headers to get the set-cookie from. @@ -512,40 +529,10 @@ const ConfigType* resolveMostSpecificPerFilterConfig(const std::string& filter_n const Router::RouteConstSharedPtr& route) { static_assert(std::is_base_of::value, "ConfigType must be a subclass of Router::RouteSpecificFilterConfig"); - if (!route || !route->routeEntry()) { + if (!route) { return nullptr; } - return route->routeEntry()->mostSpecificPerFilterConfigTyped(filter_name); -} - -/** - * The non template implementation of traversePerFilterConfig. see - * traversePerFilterConfig for docs. - */ -void traversePerFilterConfigGeneric( - const std::string& filter_name, const Router::RouteConstSharedPtr& route, - std::function cb); - -/** - * Fold all the available per route filter configs, invoking the callback with each config (if - * it is present). Iteration of the configs is in order of specificity. That means that the callback - * will be called first for a config on a Virtual host, then a route, and finally a route entry - * (weighted cluster). If a config is not present, the callback will not be invoked. - */ -template -void traversePerFilterConfig(const std::string& filter_name, - const Router::RouteConstSharedPtr& route, - std::function cb) { - static_assert(std::is_base_of::value, - "ConfigType must be a subclass of Router::RouteSpecificFilterConfig"); - - traversePerFilterConfigGeneric( - filter_name, route, [&cb](const Router::RouteSpecificFilterConfig& cfg) { - const ConfigType* typed_cfg = dynamic_cast(&cfg); - if (typed_cfg != nullptr) { - cb(*typed_cfg); - } - }); + return dynamic_cast(route->mostSpecificPerFilterConfig(filter_name)); } /** @@ -567,14 +554,17 @@ getMergedPerFilterConfig(const std::string& filter_name, const Router::RouteCons absl::optional merged; - traversePerFilterConfig(filter_name, route, - [&reduce, &merged](const ConfigType& cfg) { - if (!merged) { - merged.emplace(cfg); - } else { - reduce(merged.value(), cfg); - } - }); + if (route) { + route->traversePerFilterConfig( + filter_name, [&reduce, &merged](const Router::RouteSpecificFilterConfig& cfg) { + const ConfigType* typed_cfg = dynamic_cast(&cfg); + if (!merged) { + merged.emplace(*typed_cfg); + } else { + reduce(merged.value(), *typed_cfg); + } + }); + } return merged; } diff --git a/source/common/init/target_impl.cc b/source/common/init/target_impl.cc index 1c8ddfdea5cec..5e41e62f409c2 100644 --- a/source/common/init/target_impl.cc +++ b/source/common/init/target_impl.cc @@ -45,9 +45,11 @@ bool TargetImpl::ready() { if (watcher_handle_) { // If we have a handle for the ManagerImpl's watcher, signal it and then reset so it can't be // accidentally signaled again. - const bool result = watcher_handle_->ready(); - watcher_handle_.reset(); - return result; + // NOTE: We must move watcher_handle_ to a local to avoid the scenario in which as a result of + // calling ready() this target is destroyed. This is possible in practice, for example when + // a listener is deleted as a result of a failure in the context of the ready() call. + auto local_watcher_handle = std::move(watcher_handle_); + return local_watcher_handle->ready(); } return false; } @@ -75,12 +77,14 @@ TargetHandlePtr SharedTargetImpl::createHandle(absl::string_view handle_name) co bool SharedTargetImpl::ready() { initialized_ = true; - bool all_notified = !watcher_handles_.empty(); - for (auto& watcher_handle : watcher_handles_) { + // NOTE: We must move watcher_handles_ to a local to avoid the scenario in which as a result of + // calling ready() this target is destroyed. This is possible in practice, for example when + // a listener is deleted as a result of a failure in the context of the ready() call. + auto local_watcher_handles = std::move(watcher_handles_); + bool all_notified = !local_watcher_handles.empty(); + for (auto& watcher_handle : local_watcher_handles) { all_notified = watcher_handle->ready() && all_notified; } - // save heap and avoid repeatedly invoke - watcher_handles_.clear(); return all_notified; } diff --git a/source/common/local_info/BUILD b/source/common/local_info/BUILD index 254710601695f..6ef5460977b4e 100644 --- a/source/common/local_info/BUILD +++ b/source/common/local_info/BUILD @@ -14,7 +14,6 @@ envoy_cc_library( deps = [ "//envoy/local_info:local_info_interface", "//source/common/config:context_provider_lib", - "//source/common/config:version_converter_lib", "//source/common/stats:symbol_table_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/local_info/local_info_impl.h b/source/common/local_info/local_info_impl.h index 6280d782088c7..8f5b3e5fba560 100644 --- a/source/common/local_info/local_info_impl.h +++ b/source/common/local_info/local_info_impl.h @@ -6,7 +6,6 @@ #include "envoy/local_info/local_info.h" #include "source/common/config/context_provider_impl.h" -#include "source/common/config/version_converter.h" #include "source/common/stats/symbol_table_impl.h" namespace Envoy { diff --git a/source/common/matcher/exact_map_matcher.h b/source/common/matcher/exact_map_matcher.h index 61de9e6269137..62a4f44e17c6b 100644 --- a/source/common/matcher/exact_map_matcher.h +++ b/source/common/matcher/exact_map_matcher.h @@ -18,7 +18,7 @@ class ExactMapMatcher : public MatchTree, Logger::Loggable::MatchResult match(const DataType& data) override { const auto input = data_input_->get(data); - ENVOY_LOG(debug, "Attempting to match {}", input); + ENVOY_LOG(trace, "Attempting to match {}", input); if (input.data_availability_ == DataInputGetResult::DataAvailability::NotAvailable) { return {MatchState::UnableToMatch, absl::nullopt}; } diff --git a/source/common/matcher/field_matcher.h b/source/common/matcher/field_matcher.h index c1145d7321e9c..a7f8499179c9b 100644 --- a/source/common/matcher/field_matcher.h +++ b/source/common/matcher/field_matcher.h @@ -149,7 +149,7 @@ class SingleFieldMatcher : public FieldMatcher, Logger::Loggableget(data); - ENVOY_LOG(debug, "Attempting to match {}", input); + ENVOY_LOG(trace, "Attempting to match {}", input); if (input.data_availability_ == DataInputGetResult::DataAvailability::NotAvailable) { return {MatchState::UnableToMatch, absl::nullopt}; } @@ -157,11 +157,11 @@ class SingleFieldMatcher : public FieldMatcher, Logger::Loggablematch(input.data_); if (!current_match && input.data_availability_ == DataInputGetResult::DataAvailability::MoreDataMightBeAvailable) { - ENVOY_LOG(debug, "No match yet; delaying result as more data might be available."); + ENVOY_LOG(trace, "No match yet; delaying result as more data might be available."); return {MatchState::UnableToMatch, absl::nullopt}; } - ENVOY_LOG(debug, "Match result: {}", current_match); + ENVOY_LOG(trace, "Match result: {}", current_match); return {MatchState::MatchComplete, current_match}; } diff --git a/source/common/matcher/matcher.h b/source/common/matcher/matcher.h index 40f52b3ae699c..8afa2a0b96394 100644 --- a/source/common/matcher/matcher.h +++ b/source/common/matcher/matcher.h @@ -77,11 +77,12 @@ template class MatchTreeFactory { : action_factory_context_(context), server_factory_context_(server_factory_context), validation_visitor_(validation_visitor) {} - MatchTreeFactoryCb create(const envoy::config::common::matcher::v3::Matcher& config) { + // TODO(snowp): Remove this type parameter once we only have one Matcher proto. + template MatchTreeFactoryCb create(const MatcherType& config) { switch (config.matcher_type_case()) { - case envoy::config::common::matcher::v3::Matcher::kMatcherTree: + case MatcherType::kMatcherTree: return createTreeMatcher(config); - case envoy::config::common::matcher::v3::Matcher::kMatcherList: + case MatcherType::kMatcherList: return createListMatcher(config); default: NOT_REACHED_GCOVR_EXCL_LINE; @@ -90,14 +91,15 @@ template class MatchTreeFactory { } private: - MatchTreeFactoryCb - createListMatcher(const envoy::config::common::matcher::v3::Matcher& config) { + template + MatchTreeFactoryCb createListMatcher(const MatcherType& config) { std::vector, OnMatchFactoryCb>> matcher_factories; matcher_factories.reserve(config.matcher_list().matchers().size()); for (const auto& matcher : config.matcher_list().matchers()) { - matcher_factories.push_back(std::make_pair(createFieldMatcher(matcher.predicate()), - *createOnMatch(matcher.on_match()))); + matcher_factories.push_back(std::make_pair( + createFieldMatcher(matcher.predicate()), + *createOnMatch(matcher.on_match()))); } auto on_no_match = createOnMatch(config.on_no_match()); @@ -114,13 +116,12 @@ template class MatchTreeFactory { }; } - template + template FieldMatcherFactoryCb createAggregateFieldMatcherFactoryCb( - const Protobuf::RepeatedPtrField< - envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate>& predicates) { + const Protobuf::RepeatedPtrField& predicates) { std::vector> sub_matchers; for (const auto& predicate : predicates) { - sub_matchers.emplace_back(createFieldMatcher(predicate)); + sub_matchers.emplace_back(createFieldMatcher(predicate)); } return [sub_matchers]() { @@ -134,10 +135,10 @@ template class MatchTreeFactory { }; } - FieldMatcherFactoryCb createFieldMatcher( - const envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate& field_predicate) { + template + FieldMatcherFactoryCb createFieldMatcher(const FieldMatcherType& field_predicate) { switch (field_predicate.match_type_case()) { - case (envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate::kSinglePredicate): { + case (PredicateType::kSinglePredicate): { auto data_input = createDataInput(field_predicate.single_predicate().input()); auto input_matcher = createInputMatcher(field_predicate.single_predicate()); @@ -145,14 +146,14 @@ template class MatchTreeFactory { return std::make_unique>(data_input(), input_matcher()); }; } - case (envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate::kOrMatcher): - return createAggregateFieldMatcherFactoryCb>( + case (PredicateType::kOrMatcher): + return createAggregateFieldMatcherFactoryCb, PredicateType>( field_predicate.or_matcher().predicate()); - case (envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate::kAndMatcher): - return createAggregateFieldMatcherFactoryCb>( + case (PredicateType::kAndMatcher): + return createAggregateFieldMatcherFactoryCb, PredicateType>( field_predicate.and_matcher().predicate()); - case (envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate::kNotMatcher): { - auto matcher_factory = createFieldMatcher(field_predicate.not_matcher()); + case (PredicateType::kNotMatcher): { + auto matcher_factory = createFieldMatcher(field_predicate.not_matcher()); return [matcher_factory]() { return std::make_unique>(matcher_factory()); @@ -163,10 +164,10 @@ template class MatchTreeFactory { } } - MatchTreeFactoryCb - createTreeMatcher(const envoy::config::common::matcher::v3::Matcher& matcher) { + template + MatchTreeFactoryCb createTreeMatcher(const MatcherType& matcher) { switch (matcher.matcher_tree().tree_type_case()) { - case envoy::config::common::matcher::v3::Matcher_MatcherTree::kExactMatchMap: { + case MatcherType::MatcherTree::kExactMatchMap: { std::vector>> match_children; match_children.reserve(matcher.matcher_tree().exact_match_map().map().size()); @@ -187,16 +188,17 @@ template class MatchTreeFactory { return multimap_matcher; }; } - case envoy::config::common::matcher::v3::Matcher_MatcherTree::kPrefixMatchMap: + case MatcherType::MatcherTree::kPrefixMatchMap: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; - case envoy::config::common::matcher::v3::Matcher_MatcherTree::kCustomMatch: + case MatcherType::MatcherTree::kCustomMatch: NOT_IMPLEMENTED_GCOVR_EXCL_LINE; default: NOT_REACHED_GCOVR_EXCL_LINE; } } - absl::optional> - createOnMatch(const envoy::config::common::matcher::v3::Matcher::OnMatch& on_match) { + + template + absl::optional> createOnMatch(const OnMatchType& on_match) { if (on_match.has_matcher()) { return [matcher_factory = create(on_match.matcher())]() { return OnMatch{{}, matcher_factory()}; @@ -231,8 +233,8 @@ template class MatchTreeFactory { const CommonProtocolInputPtr common_protocol_input_; }; - DataInputFactoryCb - createDataInput(const envoy::config::core::v3::TypedExtensionConfig& config) { + template + DataInputFactoryCb createDataInput(const TypedExtensionConfigType& config) { auto* factory = Config::Utility::getFactory>(config); if (factory != nullptr) { validation_visitor_.validateDataInput(*factory, config.typed_config().type_url()); @@ -257,17 +259,15 @@ template class MatchTreeFactory { [common_input]() { return std::make_unique(common_input()); }; } - InputMatcherFactoryCb createInputMatcher( - const envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate::SinglePredicate& - predicate) { + template + InputMatcherFactoryCb createInputMatcher(const SinglePredicateType& predicate) { switch (predicate.matcher_case()) { - case envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate::SinglePredicate:: - kValueMatch: + case SinglePredicateType::kValueMatch: return [value_match = predicate.value_match()]() { - return std::make_unique(value_match); + return std::make_unique>>( + value_match); }; - case envoy::config::common::matcher::v3::Matcher::MatcherList::Predicate::SinglePredicate:: - kCustomMatch: { + case SinglePredicateType::kCustomMatch: { auto& factory = Config::Utility::getAndCheckFactory(predicate.custom_match()); ProtobufTypes::MessagePtr message = Config::Utility::translateAnyToFactoryConfig( diff --git a/source/common/matcher/value_input_matcher.h b/source/common/matcher/value_input_matcher.h index e381f393550c2..0cfdb3a39ba78 100644 --- a/source/common/matcher/value_input_matcher.h +++ b/source/common/matcher/value_input_matcher.h @@ -7,10 +7,9 @@ namespace Envoy { namespace Matcher { -class StringInputMatcher : public InputMatcher { +template class StringInputMatcher : public InputMatcher { public: - explicit StringInputMatcher(const envoy::type::matcher::v3::StringMatcher& matcher) - : matcher_(matcher) {} + explicit StringInputMatcher(const StringMatcherType& matcher) : matcher_(matcher) {} bool match(absl::optional input) override { if (!input) { @@ -21,7 +20,7 @@ class StringInputMatcher : public InputMatcher { } private: - const Matchers::StringMatcherImpl matcher_; + const Matchers::StringMatcherImpl matcher_; }; } // namespace Matcher diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 529e25ad6c05d..6d85e13184c8a 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -98,6 +98,16 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "happy_eyeballs_connection_impl_lib", + srcs = ["happy_eyeballs_connection_impl.cc"], + hdrs = ["happy_eyeballs_connection_impl.h"], + deps = [ + ":connection_base_lib", + ":connection_lib", + ], +) + envoy_cc_library( name = "apple_dns_lib", srcs = select({ diff --git a/source/common/network/apple_dns_impl.cc b/source/common/network/apple_dns_impl.cc index bd0a790c609d1..520bdada5eede 100644 --- a/source/common/network/apple_dns_impl.cc +++ b/source/common/network/apple_dns_impl.cc @@ -52,58 +52,68 @@ AppleDnsResolverStats AppleDnsResolverImpl::generateAppleDnsResolverStats(Stats: return {ALL_APPLE_DNS_RESOLVER_STATS(POOL_COUNTER(scope))}; } +AppleDnsResolverImpl::StartResolutionResult +AppleDnsResolverImpl::startResolution(const std::string& dns_name, + DnsLookupFamily dns_lookup_family, ResolveCb callback) { + ENVOY_LOG_EVENT(debug, "apple_dns_start", "DNS resolution for {} started", dns_name); + + // When an IP address is submitted to c-ares in DnsResolverImpl, c-ares synchronously returns + // the IP without submitting a DNS query. Because Envoy has come to rely on this behavior, this + // resolver implements a similar resolution path to avoid making improper DNS queries for + // resolved IPs. + auto address = Utility::parseInternetAddressNoThrow(dns_name); + + if (address != nullptr) { + ENVOY_LOG_EVENT(debug, "apple_dns_immediate_resolution", + "DNS resolver resolved ({}) to ({}) without issuing call to Apple API", + dns_name, address->asString()); + callback(DnsResolver::ResolutionStatus::Success, + {DnsResponse(address, std::chrono::seconds(60))}); + return {nullptr, true}; + } + + ENVOY_LOG(trace, "Performing DNS resolution via Apple APIs"); + auto pending_resolution = + std::make_unique(*this, callback, dispatcher_, dns_name); + + DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family); + if (error != kDNSServiceErr_NoError) { + ENVOY_LOG(warn, "DNS resolver error ({}) in dnsServiceGetAddrInfo for {}", error, dns_name); + chargeGetAddrInfoErrorStats(error); + return {nullptr, false}; + } + + if (pending_resolution->synchronously_completed_) { + return {nullptr, true}; + } + + // Hook up the query's UDS socket to the event loop to process updates. + if (!pending_resolution->dnsServiceRefSockFD()) { + ENVOY_LOG(warn, "DNS resolver error in dnsServiceRefSockFD for {}", dns_name); + return {nullptr, false}; + } + + // Return the active resolution query, giving it ownership over itself so that it can + // can clean itself up once it's done. + pending_resolution->owned_ = true; + + return {std::move(pending_resolution), true}; +} + ActiveDnsQuery* AppleDnsResolverImpl::resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family, ResolveCb callback) { - ENVOY_LOG(debug, "DNS resolver resolve={}", dns_name); - - Address::InstanceConstSharedPtr address{}; - TRY_ASSERT_MAIN_THREAD { - // When an IP address is submitted to c-ares in DnsResolverImpl, c-ares synchronously returns - // the IP without submitting a DNS query. Because Envoy has come to rely on this behavior, this - // resolver implements a similar resolution path to avoid making improper DNS queries for - // resolved IPs. - address = Utility::parseInternetAddress(dns_name); - ENVOY_LOG(debug, "DNS resolver resolved ({}) to ({}) without issuing call to Apple API", - dns_name, address->asString()); - } - END_TRY - catch (const EnvoyException& e) { - // Resolution via Apple APIs - ENVOY_LOG(trace, "DNS resolver local resolution failed with: {}", e.what()); - - auto pending_resolution = - std::make_unique(*this, callback, dispatcher_, dns_name); - - DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family); - if (error != kDNSServiceErr_NoError) { - ENVOY_LOG(warn, "DNS resolver error ({}) in dnsServiceGetAddrInfo for {}", error, dns_name); - chargeGetAddrInfoErrorStats(error); - return nullptr; - } - - // If the query was synchronously resolved in the Apple API call, there is no need to return the - // query. - if (pending_resolution->synchronously_completed_) { - return nullptr; - } - - // Otherwise, hook up the query's UDS socket to the event loop to process updates. - if (!pending_resolution->dnsServiceRefSockFD()) { - ENVOY_LOG(warn, "DNS resolver error in dnsServiceRefSockFD for {}", dns_name); - return nullptr; - } - - pending_resolution->owned_ = true; - return pending_resolution.release(); + auto pending_resolution_and_success = startResolution(dns_name, dns_lookup_family, callback); + + // If we synchronously failed the resolution, trigger a failure callback. + if (!pending_resolution_and_success.second) { + ENVOY_LOG_EVENT(debug, "apple_dns_immediate_failure", "DNS resolution for {} failed", dns_name); + + callback(DnsResolver::ResolutionStatus::Failure, {}); + return nullptr; } - ASSERT(address != nullptr); - // Finish local, synchronous resolution. This needs to happen outside of the exception block above - // as the callback itself can throw. - callback(DnsResolver::ResolutionStatus::Success, - {DnsResponse(address, std::chrono::seconds(60))}); - return nullptr; + return pending_resolution_and_success.first.release(); } void AppleDnsResolverImpl::chargeGetAddrInfoErrorStats(DNSServiceErrorType error_code) { @@ -176,6 +186,8 @@ void AppleDnsResolverImpl::PendingResolution::onEventCallback(uint32_t events) { } void AppleDnsResolverImpl::PendingResolution::finishResolve() { + ENVOY_LOG_EVENT(debug, "apple_dns_resolution_complete", + "dns resolution for {} completed with status {}", dns_name_, pending_cb_.status_); callback_(pending_cb_.status_, std::move(pending_cb_.responses_)); if (owned_) { @@ -238,7 +250,7 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( dns_name_, flags, flags & kDNSServiceFlagsMoreComing ? "yes" : "no", flags & kDNSServiceFlagsAdd ? "yes" : "no", interface_index, error_code, hostname); - // Generic error handling. + // Make sure that we trigger the failure callback if we get an error back. if (error_code != kDNSServiceErr_NoError) { parent_.chargeGetAddrInfoErrorStats(error_code); diff --git a/source/common/network/apple_dns_impl.h b/source/common/network/apple_dns_impl.h index d788f076586d6..17328b484b3d8 100644 --- a/source/common/network/apple_dns_impl.h +++ b/source/common/network/apple_dns_impl.h @@ -61,7 +61,7 @@ struct AppleDnsResolverStats { * Implementation of DnsResolver that uses Apple dns_sd.h APIs. All calls and callbacks are assumed * to happen on the thread that owns the creating dispatcher. */ -class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable { +class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable { public: AppleDnsResolverImpl(Event::Dispatcher& dispatcher, Stats::Scope& root_scope); @@ -72,6 +72,14 @@ class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable, bool>; + StartResolutionResult startResolution(const std::string& dns_name, + DnsLookupFamily dns_lookup_family, ResolveCb callback); + void chargeGetAddrInfoErrorStats(DNSServiceErrorType error_code); struct PendingResolution : public ActiveDnsQuery { diff --git a/source/common/network/base_listener_impl.cc b/source/common/network/base_listener_impl.cc index df953ecd29e3d..aa726efccc315 100644 --- a/source/common/network/base_listener_impl.cc +++ b/source/common/network/base_listener_impl.cc @@ -17,12 +17,12 @@ namespace Network { BaseListenerImpl::BaseListenerImpl(Event::DispatcherImpl& dispatcher, SocketSharedPtr socket) : local_address_(nullptr), dispatcher_(dispatcher), socket_(std::move(socket)) { - const auto ip = socket_->addressProvider().localAddress()->ip(); + const auto ip = socket_->connectionInfoProvider().localAddress()->ip(); // Only use the listen socket's local address for new connections if it is not the all hosts // address (e.g., 0.0.0.0 for IPv4). if (!(ip && ip->isAnyAddress())) { - local_address_ = socket_->addressProvider().localAddress(); + local_address_ = socket_->connectionInfoProvider().localAddress(); } } diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 5325dab742648..b5d3472ad8f57 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -98,7 +98,8 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt // TODO(soulxu): generate the connection id inside the addressProvider directly, // then we don't need a setter or any of the optional stuff. - socket_->addressProvider().setConnectionID(id()); + socket_->connectionInfoProvider().setConnectionID(id()); + socket_->connectionInfoProvider().setSslConnection(transport_socket_->ssl()); } ConnectionImpl::~ConnectionImpl() { @@ -278,8 +279,8 @@ void ConnectionImpl::noDelay(bool enable) { return; } - // Don't set NODELAY for unix domain sockets - if (socket_->addressType() == Address::Type::Pipe) { + // Don't set NODELAY for unix domain sockets or internal socket. + if (socket_->addressType() != Address::Type::Ip) { return; } @@ -581,7 +582,7 @@ void ConnectionImpl::onFileEvent(uint32_t events) { } // It's possible for a write event callback to close the socket (which will cause fd_ to be -1). - // In this case ignore write event processing. + // In this case ignore read event processing. if (ioHandle().isOpen() && (events & Event::FileReadyType::Read)) { onReadReady(); } @@ -677,7 +678,8 @@ void ConnectionImpl::onWriteReady() { return; } } else { - ENVOY_CONN_LOG(debug, "delayed connection error: {}", *this, error); + failure_reason_ = absl::StrCat("delayed connect error: ", error); + ENVOY_CONN_LOG(debug, "{}", *this, failure_reason_); closeSocket(ConnectionEvent::RemoteClose); return; } @@ -760,6 +762,13 @@ bool ConnectionImpl::bothSidesHalfClosed() { } absl::string_view ConnectionImpl::transportFailureReason() const { + // TODO(mattklein123): Is it possible for there to be a local failure reason and a transport + // failure reason? If so we may need to combine these somehow. + ENVOY_BUG(!(!failure_reason_.empty() && !transport_socket_->failureReason().empty()), + "both connection and transport failure reason are not empty"); + if (!failure_reason_.empty()) { + return failure_reason_; + } return transport_socket_->failureReason(); } @@ -788,10 +797,13 @@ ServerConnectionImpl::ServerConnectionImpl(Event::Dispatcher& dispatcher, : ConnectionImpl(dispatcher, std::move(socket), std::move(transport_socket), stream_info, connected) {} -void ServerConnectionImpl::setTransportSocketConnectTimeout(std::chrono::milliseconds timeout) { +void ServerConnectionImpl::setTransportSocketConnectTimeout(std::chrono::milliseconds timeout, + Stats::Counter& timeout_stat) { if (!transport_connect_pending_) { return; } + + transport_socket_timeout_stat_ = &timeout_stat; if (transport_socket_connect_timer_ == nullptr) { transport_socket_connect_timer_ = dispatcher_.createScaledTimer(Event::ScaledTimerType::TransportSocketConnectTimeout, @@ -814,6 +826,7 @@ void ServerConnectionImpl::raiseEvent(ConnectionEvent event) { void ServerConnectionImpl::onTransportSocketConnectTimeout() { stream_info_.setConnectionTerminationDetails(kTransportSocketConnectTimeoutTerminationDetails); closeConnectionImmediately(); + transport_socket_timeout_stat_->inc(); } ClientConnectionImpl::ClientConnectionImpl( @@ -821,12 +834,21 @@ ClientConnectionImpl::ClientConnectionImpl( const Network::Address::InstanceConstSharedPtr& source_address, Network::TransportSocketPtr&& transport_socket, const Network::ConnectionSocket::OptionsSharedPtr& options) - : ConnectionImpl(dispatcher, std::make_unique(remote_address, options), - std::move(transport_socket), stream_info_, false), - stream_info_(dispatcher.timeSource(), socket_->addressProviderSharedPtr()) { + : ClientConnectionImpl(dispatcher, std::make_unique(remote_address, options), + source_address, std::move(transport_socket), options) {} + +ClientConnectionImpl::ClientConnectionImpl( + Event::Dispatcher& dispatcher, std::unique_ptr socket, + const Address::InstanceConstSharedPtr& source_address, + Network::TransportSocketPtr&& transport_socket, + const Network::ConnectionSocket::OptionsSharedPtr& options) + : ConnectionImpl(dispatcher, std::move(socket), std::move(transport_socket), stream_info_, + false), + stream_info_(dispatcher.timeSource(), socket_->connectionInfoProviderSharedPtr()) { + // There are no meaningful socket options or source address semantics for // non-IP sockets, so skip. - if (remote_address->ip() == nullptr) { + if (socket_->connectionInfoProviderSharedPtr()->remoteAddress()->ip() == nullptr) { return; } if (!Network::Socket::applyOptions(options, *socket_, @@ -841,16 +863,16 @@ ClientConnectionImpl::ClientConnectionImpl( const Network::Address::InstanceConstSharedPtr* source = &source_address; - if (socket_->addressProvider().localAddress()) { - source = &socket_->addressProvider().localAddress(); + if (socket_->connectionInfoProvider().localAddress()) { + source = &socket_->connectionInfoProvider().localAddress(); } if (*source != nullptr) { Api::SysCallIntResult result = socket_->bind(*source); if (result.return_value_ < 0) { - // TODO(lizan): consider add this error into transportFailureReason. - ENVOY_LOG_MISC(debug, "Bind failure. Failed to bind to {}: {}", source->get()->asString(), - errorDetails(result.errno_)); + failure_reason_ = absl::StrCat("failed to bind to ", source->get()->asString(), ": ", + errorDetails(result.errno_)); + ENVOY_LOG_MISC(debug, failure_reason_); bind_error_ = true; // Set a special error state to ensure asynchronous close to give the owner of the // ConnectionImpl a chance to add callbacks and detect the "disconnect". @@ -864,8 +886,9 @@ ClientConnectionImpl::ClientConnectionImpl( void ClientConnectionImpl::connect() { ENVOY_CONN_LOG(debug, "connecting to {}", *this, - socket_->addressProvider().remoteAddress()->asString()); - const Api::SysCallIntResult result = socket_->connect(socket_->addressProvider().remoteAddress()); + socket_->connectionInfoProvider().remoteAddress()->asString()); + const Api::SysCallIntResult result = + socket_->connect(socket_->connectionInfoProvider().remoteAddress()); if (result.return_value_ == 0) { // write will become ready. ASSERT(connecting_); @@ -886,7 +909,8 @@ void ClientConnectionImpl::connect() { } else { immediate_error_event_ = ConnectionEvent::RemoteClose; connecting_ = false; - ENVOY_CONN_LOG(debug, "immediate connection error: {}", *this, result.errno_); + failure_reason_ = absl::StrCat("immediate connect error: ", result.errno_); + ENVOY_CONN_LOG(debug, "{}", *this, failure_reason_); // Trigger a write event. This is needed on macOS and seems harmless on Linux. ioHandle().activateFileEvents(Event::FileReadyType::Write); diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 4e9f0044924c1..e20119ffb095a 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -22,6 +22,8 @@ class TestPauseFilter; namespace Network { +class HappyEyeballsConnectionImpl; + /** * Utility functions for the connection implementation. */ @@ -70,11 +72,11 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback void readDisable(bool disable) override; void detectEarlyCloseWhenReadDisabled(bool value) override { detect_early_close_ = value; } bool readEnabled() const override; - const SocketAddressProvider& addressProvider() const override { - return socket_->addressProvider(); + const ConnectionInfoProvider& connectionInfoProvider() const override { + return socket_->connectionInfoProvider(); } - SocketAddressProviderSharedPtr addressProviderSharedPtr() const override { - return socket_->addressProviderSharedPtr(); + ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override { + return socket_->connectionInfoProviderSharedPtr(); } absl::optional unixSocketPeerCredentials() const override; Ssl::ConnectionInfoConstSharedPtr ssl() const override { return transport_socket_->ssl(); } @@ -166,8 +168,10 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback bool connecting_{false}; ConnectionEvent immediate_error_event_{ConnectionEvent::Connected}; bool bind_error_{false}; + std::string failure_reason_; private: + friend class HappyEyeballsConnectionImpl; friend class Envoy::RandomPauseFilter; friend class Envoy::TestPauseFilter; @@ -217,7 +221,8 @@ class ServerConnectionImpl : public ConnectionImpl, virtual public ServerConnect bool connected); // ServerConnection impl - void setTransportSocketConnectTimeout(std::chrono::milliseconds timeout) override; + void setTransportSocketConnectTimeout(std::chrono::milliseconds timeout, + Stats::Counter& timeout_stat) override; void raiseEvent(ConnectionEvent event) override; private: @@ -227,6 +232,7 @@ class ServerConnectionImpl : public ConnectionImpl, virtual public ServerConnect // Implements a timeout for the transport socket signaling connection. The timer is enabled by a // call to setTransportSocketConnectTimeout and is reset when the connection is established. Event::TimerPtr transport_socket_connect_timer_; + Stats::Counter* transport_socket_timeout_stat_; }; /** @@ -239,6 +245,10 @@ class ClientConnectionImpl : public ConnectionImpl, virtual public ClientConnect const Address::InstanceConstSharedPtr& source_address, Network::TransportSocketPtr&& transport_socket, const Network::ConnectionSocket::OptionsSharedPtr& options); + ClientConnectionImpl(Event::Dispatcher& dispatcher, std::unique_ptr socket, + const Address::InstanceConstSharedPtr& source_address, + Network::TransportSocketPtr&& transport_socket, + const Network::ConnectionSocket::OptionsSharedPtr& options); // Network::ClientConnection void connect() override; diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index 341f5400c3ccd..0f093edb405bb 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -105,6 +105,9 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i // callback_ target _should_ still be around. In that case, raise the callback_ so the target // can be done with this query and initiate a new one. if (!cancelled_) { + ENVOY_LOG_EVENT(debug, "cares_dns_resolution_destroyed", "dns resolution for {} destroyed", + dns_name_); + callback_(ResolutionStatus::Failure, {}); } delete this; @@ -180,6 +183,10 @@ void DnsResolverImpl::PendingResolution::onAresGetAddrInfoCallback(int status, i // portFromTcpUrl(). // TODO(chaoqin-li1123): remove try catch pattern here once we figure how to handle unexpected // exception in fuzz tests. + ENVOY_LOG_EVENT(debug, "cares_dns_resolution_complete", + "dns resolution for {} completed with status {}", dns_name_, + resolution_status); + TRY_NEEDS_AUDIT { callback_(resolution_status, std::move(address_list)); } catch (const EnvoyException& e) { ENVOY_LOG(critical, "EnvoyException in c-ares callback: {}", e.what()); @@ -253,6 +260,8 @@ void DnsResolverImpl::onAresSocketStateChange(os_fd_t fd, int read, int write) { ActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family, ResolveCb callback) { + ENVOY_LOG_EVENT(debug, "cares_dns_resolution_start", "dns resolution for {} started", dns_name); + // TODO(hennna): Add DNS caching which will allow testing the edge case of a // failed initial call to getAddrInfo followed by a synchronous IPv4 // resolution. diff --git a/source/common/network/dns_impl.h b/source/common/network/dns_impl.h index 5f96db4c47e9e..abcea92a4f885 100644 --- a/source/common/network/dns_impl.h +++ b/source/common/network/dns_impl.h @@ -24,7 +24,7 @@ class DnsResolverImplPeer; * Implementation of DnsResolver that uses c-ares. All calls and callbacks are assumed to * happen on the thread that owns the creating dispatcher. */ -class DnsResolverImpl : public DnsResolver, protected Logger::Loggable { +class DnsResolverImpl : public DnsResolver, protected Logger::Loggable { public: DnsResolverImpl(Event::Dispatcher& dispatcher, const std::vector& resolvers, @@ -107,7 +107,7 @@ class DnsResolverImpl : public DnsResolver, protected Logger::Loggable events_; const absl::optional resolvers_csv_; diff --git a/source/common/network/filter_matcher.h b/source/common/network/filter_matcher.h index d1178a3301631..14313bf63c7b6 100644 --- a/source/common/network/filter_matcher.h +++ b/source/common/network/filter_matcher.h @@ -45,7 +45,7 @@ class ListenerFilterDstPortMatcher final : public ListenerFilterMatcher { explicit ListenerFilterDstPortMatcher(const ::envoy::type::v3::Int32Range& range) : start_(range.start()), end_(range.end()) {} bool matches(ListenerFilterCallbacks& cb) const override { - const auto& address = cb.socket().addressProvider().localAddress(); + const auto& address = cb.socket().connectionInfoProvider().localAddress(); // Match on destination port (only for IP addresses). if (address->type() == Address::Type::Ip) { const auto port = address->ip()->port(); diff --git a/source/common/network/happy_eyeballs_connection_impl.cc b/source/common/network/happy_eyeballs_connection_impl.cc new file mode 100644 index 0000000000000..f64f1252142bb --- /dev/null +++ b/source/common/network/happy_eyeballs_connection_impl.cc @@ -0,0 +1,567 @@ +#include "source/common/network/happy_eyeballs_connection_impl.h" + +#include + +namespace Envoy { +namespace Network { + +HappyEyeballsConnectionImpl::HappyEyeballsConnectionImpl( + Event::Dispatcher& dispatcher, const std::vector& address_list, + Address::InstanceConstSharedPtr source_address, TransportSocketFactory& socket_factory, + TransportSocketOptionsConstSharedPtr transport_socket_options, + const ConnectionSocket::OptionsSharedPtr options) + : id_(ConnectionImpl::next_global_id_++), dispatcher_(dispatcher), address_list_(address_list), + connection_construction_state_( + {source_address, socket_factory, transport_socket_options, options}), + next_attempt_timer_(dispatcher_.createTimer([this]() -> void { tryAnotherConnection(); })) { + connections_.push_back(createNextConnection()); +} + +HappyEyeballsConnectionImpl::~HappyEyeballsConnectionImpl() = default; + +void HappyEyeballsConnectionImpl::connect() { + ENVOY_BUG(!connect_finished_, "connection already connected"); + connections_[0]->connect(); + maybeScheduleNextAttempt(); +} + +void HappyEyeballsConnectionImpl::addWriteFilter(WriteFilterSharedPtr filter) { + if (connect_finished_) { + connections_[0]->addWriteFilter(filter); + return; + } + // Filters should only be notified of events on the final connection, so defer adding + // filters until the final connection has been determined. + post_connect_state_.write_filters_.push_back(filter); +} + +void HappyEyeballsConnectionImpl::addFilter(FilterSharedPtr filter) { + if (connect_finished_) { + connections_[0]->addFilter(filter); + return; + } + // Filters should only be notified of events on the final connection, so defer adding + // filters until the final connection has been determined. + post_connect_state_.filters_.push_back(filter); +} + +void HappyEyeballsConnectionImpl::addReadFilter(ReadFilterSharedPtr filter) { + if (connect_finished_) { + connections_[0]->addReadFilter(filter); + return; + } + // Filters should only be notified of events on the final connection, so defer adding + // filters until the final connection has been determined. + post_connect_state_.read_filters_.push_back(filter); +} + +void HappyEyeballsConnectionImpl::removeReadFilter(ReadFilterSharedPtr filter) { + if (connect_finished_) { + connections_[0]->removeReadFilter(filter); + return; + } + // Filters should only be notified of events on the final connection, so remove + // the filters from the list of deferred filters. + auto i = post_connect_state_.read_filters_.begin(); + while (i != post_connect_state_.read_filters_.end()) { + if (*i == filter) { + post_connect_state_.read_filters_.erase(i); + return; + } + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +bool HappyEyeballsConnectionImpl::initializeReadFilters() { + if (connect_finished_) { + return connections_[0]->initializeReadFilters(); + } + // Filters should only be notified of events on the final connection, so defer + // initialization of the filters until the final connection has been determined. + if (post_connect_state_.read_filters_.empty()) { + return false; + } + post_connect_state_.initialize_read_filters_ = true; + return true; +} + +void HappyEyeballsConnectionImpl::addBytesSentCallback(Connection::BytesSentCb cb) { + if (connect_finished_) { + connections_[0]->addBytesSentCallback(cb); + return; + } + // Callbacks should only be notified of events on the final connection, so defer adding + // callbacks until the final connection has been determined. + post_connect_state_.bytes_sent_callbacks_.push_back(cb); +} + +void HappyEyeballsConnectionImpl::enableHalfClose(bool enabled) { + if (!connect_finished_) { + per_connection_state_.enable_half_close_ = enabled; + } + for (auto& connection : connections_) { + connection->enableHalfClose(enabled); + } +} + +bool HappyEyeballsConnectionImpl::isHalfCloseEnabled() { + return connections_[0]->isHalfCloseEnabled(); +} + +std::string HappyEyeballsConnectionImpl::nextProtocol() const { + return connections_[0]->nextProtocol(); +} + +void HappyEyeballsConnectionImpl::noDelay(bool enable) { + if (!connect_finished_) { + per_connection_state_.no_delay_ = enable; + } + for (auto& connection : connections_) { + connection->noDelay(enable); + } +} + +void HappyEyeballsConnectionImpl::readDisable(bool disable) { + if (connect_finished_) { + connections_[0]->readDisable(disable); + return; + } + if (!post_connect_state_.read_disable_count_.has_value()) { + post_connect_state_.read_disable_count_ = 0; + } + + if (disable) { + post_connect_state_.read_disable_count_.value()++; + } else { + ASSERT(post_connect_state_.read_disable_count_ != 0); + post_connect_state_.read_disable_count_.value()--; + } +} + +void HappyEyeballsConnectionImpl::detectEarlyCloseWhenReadDisabled(bool value) { + if (!connect_finished_) { + per_connection_state_.detect_early_close_when_read_disabled_ = value; + } + for (auto& connection : connections_) { + connection->detectEarlyCloseWhenReadDisabled(value); + } +} + +bool HappyEyeballsConnectionImpl::readEnabled() const { + if (!connect_finished_) { + return !post_connect_state_.read_disable_count_.has_value() || + post_connect_state_.read_disable_count_ == 0; + } + return connections_[0]->readEnabled(); +} + +const ConnectionInfoProvider& HappyEyeballsConnectionImpl::connectionInfoProvider() const { + return connections_[0]->connectionInfoProvider(); +} + +ConnectionInfoProviderSharedPtr +HappyEyeballsConnectionImpl::connectionInfoProviderSharedPtr() const { + return connections_[0]->connectionInfoProviderSharedPtr(); +} + +absl::optional +HappyEyeballsConnectionImpl::unixSocketPeerCredentials() const { + return connections_[0]->unixSocketPeerCredentials(); +} + +Ssl::ConnectionInfoConstSharedPtr HappyEyeballsConnectionImpl::ssl() const { + return connections_[0]->ssl(); +} + +Connection::State HappyEyeballsConnectionImpl::state() const { + if (!connect_finished_) { + ASSERT(connections_[0]->state() == Connection::State::Open); + } + return connections_[0]->state(); +} + +bool HappyEyeballsConnectionImpl::connecting() const { + ASSERT(connect_finished_ || connections_[0]->connecting()); + return connections_[0]->connecting(); +} + +void HappyEyeballsConnectionImpl::write(Buffer::Instance& data, bool end_stream) { + if (connect_finished_) { + connections_[0]->write(data, end_stream); + return; + } + + // Data should only be written on the final connection, so defer actually writing + // until the final connection has been determined. + if (!post_connect_state_.write_buffer_.has_value()) { + post_connect_state_.end_stream_ = false; + post_connect_state_.write_buffer_ = dispatcher_.getWatermarkFactory().createBuffer( + [this]() -> void { this->onWriteBufferLowWatermark(); }, + [this]() -> void { this->onWriteBufferHighWatermark(); }, + // ConnectionCallbacks do not have a method to receive overflow watermark + // notification. So this class, like ConnectionImpl, has a no-op handler. + []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); + if (per_connection_state_.buffer_limits_.has_value()) { + post_connect_state_.write_buffer_.value()->setWatermarks( + per_connection_state_.buffer_limits_.value()); + } + } + + post_connect_state_.write_buffer_.value()->move(data); + ASSERT(!post_connect_state_.end_stream_.value()); // Don't write after end_stream. + post_connect_state_.end_stream_ = end_stream; +} + +void HappyEyeballsConnectionImpl::setBufferLimits(uint32_t limit) { + if (!connect_finished_) { + ASSERT(!per_connection_state_.buffer_limits_.has_value()); + per_connection_state_.buffer_limits_ = limit; + if (post_connect_state_.write_buffer_.has_value()) { + post_connect_state_.write_buffer_.value()->setWatermarks(limit); + } + } + for (auto& connection : connections_) { + connection->setBufferLimits(limit); + } +} + +uint32_t HappyEyeballsConnectionImpl::bufferLimit() const { return connections_[0]->bufferLimit(); } + +bool HappyEyeballsConnectionImpl::aboveHighWatermark() const { + if (!connect_finished_) { + // Writes are deferred, so return the watermark status from the deferred write buffer. + return post_connect_state_.write_buffer_.has_value() && + post_connect_state_.write_buffer_.value()->highWatermarkTriggered(); + } + + return connections_[0]->aboveHighWatermark(); +} + +const ConnectionSocket::OptionsSharedPtr& HappyEyeballsConnectionImpl::socketOptions() const { + // Note, this might change before connect finishes. + return connections_[0]->socketOptions(); +} + +absl::string_view HappyEyeballsConnectionImpl::requestedServerName() const { + // Note, this might change before connect finishes. + return connections_[0]->requestedServerName(); +} + +StreamInfo::StreamInfo& HappyEyeballsConnectionImpl::streamInfo() { + // Note, this might change before connect finishes. + return connections_[0]->streamInfo(); +} + +const StreamInfo::StreamInfo& HappyEyeballsConnectionImpl::streamInfo() const { + // Note, this might change before connect finishes. + return connections_[0]->streamInfo(); +} + +absl::string_view HappyEyeballsConnectionImpl::transportFailureReason() const { + // Note, this might change before connect finishes. + return connections_[0]->transportFailureReason(); +} + +bool HappyEyeballsConnectionImpl::startSecureTransport() { + if (!connect_finished_) { + per_connection_state_.start_secure_transport_ = true; + } + bool ret = true; + for (auto& connection : connections_) { + if (!connection->startSecureTransport()) { + ret = false; + } + } + return ret; +} + +absl::optional HappyEyeballsConnectionImpl::lastRoundTripTime() const { + // Note, this might change before connect finishes. + return connections_[0]->lastRoundTripTime(); +} + +void HappyEyeballsConnectionImpl::addConnectionCallbacks(ConnectionCallbacks& cb) { + if (connect_finished_) { + connections_[0]->addConnectionCallbacks(cb); + return; + } + // Callbacks should only be notified of events on the final connection, so defer adding + // callbacks until the final connection has been determined. + post_connect_state_.connection_callbacks_.push_back(&cb); +} + +void HappyEyeballsConnectionImpl::removeConnectionCallbacks(ConnectionCallbacks& cb) { + if (connect_finished_) { + connections_[0]->removeConnectionCallbacks(cb); + return; + } + // Callbacks should only be notified of events on the final connection, so remove + // the callback from the list of deferred callbacks. + auto i = post_connect_state_.connection_callbacks_.begin(); + while (i != post_connect_state_.connection_callbacks_.end()) { + if (*i == &cb) { + post_connect_state_.connection_callbacks_.erase(i); + return; + } + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + +void HappyEyeballsConnectionImpl::close(ConnectionCloseType type) { + if (connect_finished_) { + connections_[0]->close(type); + return; + } + + connect_finished_ = true; + next_attempt_timer_->disableTimer(); + for (size_t i = 0; i < connections_.size(); ++i) { + connections_[i]->removeConnectionCallbacks(*callbacks_wrappers_[i]); + if (i != 0) { + // Wait to close the final connection until the post-connection callbacks + // have been added. + connections_[i]->close(ConnectionCloseType::NoFlush); + } + } + connections_.resize(1); + callbacks_wrappers_.clear(); + + for (auto cb : post_connect_state_.connection_callbacks_) { + if (cb) { + connections_[0]->addConnectionCallbacks(*cb); + } + } + connections_[0]->close(type); +} + +Event::Dispatcher& HappyEyeballsConnectionImpl::dispatcher() { + ASSERT(&dispatcher_ == &connections_[0]->dispatcher()); + return connections_[0]->dispatcher(); +} + +uint64_t HappyEyeballsConnectionImpl::id() const { return id_; } + +void HappyEyeballsConnectionImpl::hashKey(std::vector& hash_key) const { + // Pack the id into sizeof(id_) uint8_t entries in the hash_key vector. + hash_key.reserve(hash_key.size() + sizeof(id_)); + for (unsigned i = 0; i < sizeof(id_); ++i) { + hash_key.push_back(0xFF & (id_ >> (8 * i))); + } +} + +void HappyEyeballsConnectionImpl::setConnectionStats(const ConnectionStats& stats) { + if (!connect_finished_) { + per_connection_state_.connection_stats_ = stats; + } + for (auto& connection : connections_) { + connection->setConnectionStats(stats); + } +} + +void HappyEyeballsConnectionImpl::setDelayedCloseTimeout(std::chrono::milliseconds timeout) { + if (!connect_finished_) { + per_connection_state_.delayed_close_timeout_ = timeout; + } + for (auto& connection : connections_) { + connection->setDelayedCloseTimeout(timeout); + } +} + +void HappyEyeballsConnectionImpl::dumpState(std::ostream& os, int indent_level) const { + const char* spaces = spacesForLevel(indent_level); + os << spaces << "HappyEyeballsConnectionImpl " << this << DUMP_MEMBER(id_) + << DUMP_MEMBER(connect_finished_) << "\n"; + + for (auto& connection : connections_) { + DUMP_DETAILS(connection); + } +} + +ClientConnectionPtr HappyEyeballsConnectionImpl::createNextConnection() { + ASSERT(next_address_ < address_list_.size()); + auto connection = dispatcher_.createClientConnection( + address_list_[next_address_++], connection_construction_state_.source_address_, + connection_construction_state_.socket_factory_.createTransportSocket( + connection_construction_state_.transport_socket_options_), + connection_construction_state_.options_); + callbacks_wrappers_.push_back(std::make_unique(*this, *connection)); + connection->addConnectionCallbacks(*callbacks_wrappers_.back()); + + if (per_connection_state_.detect_early_close_when_read_disabled_.has_value()) { + connection->detectEarlyCloseWhenReadDisabled( + per_connection_state_.detect_early_close_when_read_disabled_.value()); + } + if (per_connection_state_.no_delay_.has_value()) { + connection->noDelay(per_connection_state_.no_delay_.value()); + } + if (per_connection_state_.connection_stats_.has_value()) { + connection->setConnectionStats(*per_connection_state_.connection_stats_); + } + if (per_connection_state_.buffer_limits_.has_value()) { + connection->setBufferLimits(per_connection_state_.buffer_limits_.value()); + } + if (per_connection_state_.enable_half_close_.has_value()) { + connection->enableHalfClose(per_connection_state_.enable_half_close_.value()); + } + if (per_connection_state_.delayed_close_timeout_.has_value()) { + connection->setDelayedCloseTimeout(per_connection_state_.delayed_close_timeout_.value()); + } + if (per_connection_state_.start_secure_transport_.has_value()) { + ASSERT(per_connection_state_.start_secure_transport_); + connection->startSecureTransport(); + } + + return connection; +} + +void HappyEyeballsConnectionImpl::tryAnotherConnection() { + connections_.push_back(createNextConnection()); + connections_.back()->connect(); + maybeScheduleNextAttempt(); +} + +void HappyEyeballsConnectionImpl::maybeScheduleNextAttempt() { + if (next_address_ >= address_list_.size()) { + return; + } + next_attempt_timer_->enableTimer(std::chrono::milliseconds(300)); +} + +void HappyEyeballsConnectionImpl::onEvent(ConnectionEvent event, + ConnectionCallbacksWrapper* wrapper) { + if (event != ConnectionEvent::Connected) { + // This connection attempt has failed. If possible, start another connection attempt + // immediately, instead of waiting for the timer. + if (next_address_ < address_list_.size()) { + next_attempt_timer_->disableTimer(); + tryAnotherConnection(); + } + // If there is at least one more attempt running then the current attempt can be destroyed. + if (connections_.size() > 1) { + // Nuke this connection and associated callbacks and let a subsequent attempt proceed. + cleanupWrapperAndConnection(wrapper); + return; + } + ASSERT(connections_.size() == 1); + // This connection attempt failed but there are no more attempts to be made, so pass + // the failure up by setting up this connection as the final one. + } + + // Close all other connections and configure the final connection. + setUpFinalConnection(event, wrapper); +} + +void HappyEyeballsConnectionImpl::setUpFinalConnection(ConnectionEvent event, + ConnectionCallbacksWrapper* wrapper) { + connect_finished_ = true; + next_attempt_timer_->disableTimer(); + + // Remove the proxied connection callbacks from all connections. + for (auto& w : callbacks_wrappers_) { + w->connection().removeConnectionCallbacks(*w); + } + + // Close and delete any other connections. + auto it = connections_.begin(); + while (it != connections_.end()) { + if (it->get() != &(wrapper->connection())) { + (*it)->close(ConnectionCloseType::NoFlush); + it = connections_.erase(it); + } else { + ++it; + } + } + ASSERT(connections_.size() == 1); + callbacks_wrappers_.clear(); + + // Apply post-connect state to the final socket. + for (const auto& cb : post_connect_state_.bytes_sent_callbacks_) { + connections_[0]->addBytesSentCallback(cb); + } + + if (event == ConnectionEvent::Connected) { + // Apply post-connect state which is only connections which have succeeded. + for (auto& filter : post_connect_state_.filters_) { + connections_[0]->addFilter(filter); + } + for (auto& filter : post_connect_state_.write_filters_) { + connections_[0]->addWriteFilter(filter); + } + for (auto& filter : post_connect_state_.read_filters_) { + connections_[0]->addReadFilter(filter); + } + if (post_connect_state_.initialize_read_filters_.has_value() && + post_connect_state_.initialize_read_filters_.value()) { + // initialize_read_filters_ is set to true in initializeReadFilters() only when + // there are read filters installed. The underlying connection's initializeReadFilters() + // will always return true when read filters are installed so this should always + // return true. + ASSERT(!post_connect_state_.read_filters_.empty()); + bool initialized = connections_[0]->initializeReadFilters(); + ASSERT(initialized); + } + if (post_connect_state_.read_disable_count_.has_value()) { + for (int i = 0; i < post_connect_state_.read_disable_count_.value(); ++i) { + connections_[0]->readDisable(true); + } + } + + if (post_connect_state_.write_buffer_.has_value()) { + // write_buffer_ and end_stream_ are both set together in write(). + ASSERT(post_connect_state_.end_stream_.has_value()); + // If a buffer limit was set, ensure that it was applied to the connection. + if (per_connection_state_.buffer_limits_.has_value()) { + ASSERT(connections_[0]->bufferLimit() == per_connection_state_.buffer_limits_.value()); + } + connections_[0]->write(*post_connect_state_.write_buffer_.value(), + post_connect_state_.end_stream_.value()); + } + } + + // Add connection callbacks after moving data from the deferred write buffer so that + // any high watermark notification is swallowed and not conveyed to the callbacks, since + // that was already delivered to the callbacks when the data was written to the buffer. + for (auto cb : post_connect_state_.connection_callbacks_) { + if (cb) { + connections_[0]->addConnectionCallbacks(*cb); + } + } +} + +void HappyEyeballsConnectionImpl::cleanupWrapperAndConnection(ConnectionCallbacksWrapper* wrapper) { + wrapper->connection().removeConnectionCallbacks(*wrapper); + for (auto it = connections_.begin(); it != connections_.end();) { + if (it->get() == &(wrapper->connection())) { + (*it)->close(ConnectionCloseType::NoFlush); + it = connections_.erase(it); + } else { + ++it; + } + } + + for (auto it = callbacks_wrappers_.begin(); it != callbacks_wrappers_.end();) { + if (it->get() == wrapper) { + it = callbacks_wrappers_.erase(it); + } else { + ++it; + } + } +} + +void HappyEyeballsConnectionImpl::onWriteBufferLowWatermark() { + // Only called when moving write data from the deferred write buffer to + // the underlying connection. In this case, the connection callbacks must + // not be notified since this should be transparent to the callbacks. +} + +void HappyEyeballsConnectionImpl::onWriteBufferHighWatermark() { + ASSERT(!connect_finished_); + for (auto callback : post_connect_state_.connection_callbacks_) { + if (callback) { + callback->onAboveWriteBufferHighWatermark(); + } + } +} + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/happy_eyeballs_connection_impl.h b/source/common/network/happy_eyeballs_connection_impl.h new file mode 100644 index 0000000000000..5f24c659700ed --- /dev/null +++ b/source/common/network/happy_eyeballs_connection_impl.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "envoy/common/scope_tracker.h" +#include "envoy/network/connection.h" +#include "envoy/network/transport_socket.h" + +#include "source/common/network/connection_impl.h" + +#include "absl/types/optional.h" + +namespace Envoy { +namespace Network { + +/** + * Implementation of ClientConnection which transparently attempts connections to + * multiple different IP addresses, and uses the first connection that succeeds. + * After a connection is established, all methods simply delegate to the + * underlying connection. However, before the connection is established + * their behavior depends on their semantics. For anything which can result + * in up-call (e.g. filter registration) or which must only happen once (e.g. + * writing data) the context is saved in until the connection completes, at + * which point they are replayed to the underlying connection. For simple methods + * they are applied to each open connection and applied when creating new ones. + * + * See the Happy Eyeballs RFC at https://datatracker.ietf.org/doc/html/rfc6555 + * TODO(RyanTheOptimist): Implement the Happy Eyeballs address sorting algorithm + * either in the class or in the resolution code. + */ +class HappyEyeballsConnectionImpl : public ClientConnection { +public: + HappyEyeballsConnectionImpl(Event::Dispatcher& dispatcher, + const std::vector& address_list, + Address::InstanceConstSharedPtr source_address, + TransportSocketFactory& socket_factory, + TransportSocketOptionsConstSharedPtr transport_socket_options, + const ConnectionSocket::OptionsSharedPtr options); + + ~HappyEyeballsConnectionImpl() override; + + // Network::ClientConnection + void connect() override; + + // Methods which defer action until the final connection has been determined. + void addWriteFilter(WriteFilterSharedPtr filter) override; + void addFilter(FilterSharedPtr filter) override; + void addReadFilter(ReadFilterSharedPtr filter) override; + void removeReadFilter(ReadFilterSharedPtr filter) override; + bool initializeReadFilters() override; + void addBytesSentCallback(BytesSentCb cb) override; + void write(Buffer::Instance& data, bool end_stream) override; + void addConnectionCallbacks(ConnectionCallbacks& cb) override; + void removeConnectionCallbacks(ConnectionCallbacks& cb) override; + + // Methods which are applied to each connection attempt. + void enableHalfClose(bool enabled) override; + void noDelay(bool enable) override; + void readDisable(bool disable) override; + void detectEarlyCloseWhenReadDisabled(bool value) override; + void setConnectionStats(const ConnectionStats& stats) override; + void setDelayedCloseTimeout(std::chrono::milliseconds timeout) override; + void setBufferLimits(uint32_t limit) override; + bool startSecureTransport() override; + absl::optional lastRoundTripTime() const override; + + // Simple getters which always delegate to the first connection in connections_. + bool isHalfCloseEnabled() override; + std::string nextProtocol() const override; + // Note, this might change before connect finishes. + const ConnectionInfoProvider& connectionInfoProvider() const override; + // Note, this might change before connect finishes. + ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override; + // Note, this might change before connect finishes. + absl::optional unixSocketPeerCredentials() const override; + // Note, this might change before connect finishes. + Ssl::ConnectionInfoConstSharedPtr ssl() const override; + State state() const override; + bool connecting() const override; + uint32_t bufferLimit() const override; + const ConnectionSocket::OptionsSharedPtr& socketOptions() const override; + absl::string_view requestedServerName() const override; + StreamInfo::StreamInfo& streamInfo() override; + const StreamInfo::StreamInfo& streamInfo() const override; + absl::string_view transportFailureReason() const override; + + // Methods implemented largely by this class itself. + uint64_t id() const override; + Event::Dispatcher& dispatcher() override; + void close(ConnectionCloseType type) override; + bool readEnabled() const override; + bool aboveHighWatermark() const override; + void hashKey(std::vector& hash_key) const override; + void dumpState(std::ostream& os, int indent_level) const override; + +private: + // ConnectionCallbacks which will be set on an ClientConnection which + // sends connection events back to the HappyEyeballsConnectionImpl. + class ConnectionCallbacksWrapper : public ConnectionCallbacks { + public: + ConnectionCallbacksWrapper(HappyEyeballsConnectionImpl& parent, ClientConnection& connection) + : parent_(parent), connection_(connection) {} + + void onEvent(ConnectionEvent event) override { parent_.onEvent(event, this); } + + void onAboveWriteBufferHighWatermark() override { + // No data will be written to the connection while the wrapper is associated with it, + // so the write buffer should never hit the high watermark. + NOT_REACHED_GCOVR_EXCL_LINE; + } + + void onBelowWriteBufferLowWatermark() override { + // No data will be written to the connection while the wrapper is associated with it, + // so the write buffer should never hit the high watermark. + NOT_REACHED_GCOVR_EXCL_LINE; + } + + ClientConnection& connection() { return connection_; } + + private: + HappyEyeballsConnectionImpl& parent_; + ClientConnection& connection_; + }; + + // Creates a connection to the next address in address_list_ and applies + // any settings from per_connection_state_ to the newly created connection. + ClientConnectionPtr createNextConnection(); + + // Create a new connection, connects it and scheduled a timer to start another + // connection attempt if there are more addresses to connect to. + void tryAnotherConnection(); + + // Schedules another connection attempt if there are mode address to connect to. + void maybeScheduleNextAttempt(); + + // Called by the wrapper when the wrapped connection raises the specified event. + void onEvent(ConnectionEvent event, ConnectionCallbacksWrapper* wrapper); + + // Called to bind the final connection. All other connections will be closed, and + // and deferred operations will be replayed. + void setUpFinalConnection(ConnectionEvent event, ConnectionCallbacksWrapper* wrapper); + + // Called by the write buffer containing pending writes if it goes below the + // low water mark. + void onWriteBufferLowWatermark(); + + // Called by the write buffer containing pending writes if it goes above the + // high water mark. + void onWriteBufferHighWatermark(); + + // Cleans up all state for the connection associated with wrapper. Called when the + // connection is no longer needed. + void cleanupWrapperAndConnection(ConnectionCallbacksWrapper* wrapper); + + // State which needs to be applied to every connection attempt. + struct PerConnectionState { + absl::optional detect_early_close_when_read_disabled_; + absl::optional no_delay_; + absl::optional enable_half_close_; + OptRef connection_stats_; + absl::optional buffer_limits_; + absl::optional start_secure_transport_; + absl::optional delayed_close_timeout_; + }; + + // State which needs to be saved and applied only to the final connection + // attempt. + struct PostConnectState { + std::vector connection_callbacks_; + std::vector bytes_sent_callbacks_; + std::vector read_filters_; + std::vector write_filters_; + std::vector filters_; + absl::optional write_buffer_; + absl::optional read_disable_count_; + absl::optional end_stream_; + absl::optional initialize_read_filters_; + }; + + // State which is needed to construct a new connection. + struct ConnectionConstructionState { + Address::InstanceConstSharedPtr source_address_; + TransportSocketFactory& socket_factory_; + TransportSocketOptionsConstSharedPtr transport_socket_options_; + const ConnectionSocket::OptionsSharedPtr options_; + }; + + // ID for this connection which is distinct from the ID of the underlying connections. + const uint64_t id_; + + Event::Dispatcher& dispatcher_; + + // List of addresses to attempt to connect to. + const std::vector& address_list_; + // Index of the next address to use. + size_t next_address_ = 0; + + ConnectionConstructionState connection_construction_state_; + PerConnectionState per_connection_state_; + PostConnectState post_connect_state_; + + // Set of active connections. + std::vector connections_; + std::vector> callbacks_wrappers_; + + // True when connect() has finished, either success or failure. + bool connect_finished_ = false; + Event::TimerPtr next_attempt_timer_; +}; + +} // namespace Network +} // namespace Envoy diff --git a/source/common/network/io_socket_error_impl.h b/source/common/network/io_socket_error_impl.h index bd4d80ce68e12..d17ce2a2f31cf 100644 --- a/source/common/network/io_socket_error_impl.h +++ b/source/common/network/io_socket_error_impl.h @@ -18,6 +18,7 @@ class IoSocketError : public Api::IoError { Api::IoError::IoErrorCode getErrorCode() const override; std::string getErrorDetails() const override; + int getSystemErrorCode() const override { return errno_; } // IoErrorCode::Again is used frequently. Define it to be a singleton to avoid frequent memory // allocation of such instance. If this is used, IoHandleCallResult has to be instantiated with diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 4c91190581e7c..79bd106162bad 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -19,13 +19,13 @@ namespace Envoy { namespace Network { Api::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstSharedPtr address) { - address_provider_->setLocalAddress(address); + connection_info_provider_->setLocalAddress(address); - const Api::SysCallIntResult result = SocketImpl::bind(address_provider_->localAddress()); + const Api::SysCallIntResult result = SocketImpl::bind(connection_info_provider_->localAddress()); if (SOCKET_FAILURE(result.return_value_)) { close(); throw SocketBindException(fmt::format("cannot bind '{}': {}", - address_provider_->localAddress()->asString(), + connection_info_provider_->localAddress()->asString(), errorDetails(result.errno_)), result.errno_); } @@ -41,13 +41,13 @@ void ListenSocketImpl::setListenSocketOptions(const Network::Socket::OptionsShar void ListenSocketImpl::setupSocket(const Network::Socket::OptionsSharedPtr& options) { setListenSocketOptions(options); - bind(address_provider_->localAddress()); + bind(connection_info_provider_->localAddress()); } UdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address) : ListenSocketImpl(ioHandleForAddr(Socket::Type::Stream, address), address) { RELEASE_ASSERT(io_handle_->isOpen(), ""); - bind(address_provider_->localAddress()); + bind(connection_info_provider_->localAddress()); } UdsListenSocket::UdsListenSocket(IoHandlePtr&& io_handle, diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index 08eb48d0f2fbc..4d4c84ac24a2a 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -27,7 +27,7 @@ class ListenSocketImpl : public SocketImpl { // Using `new` to access a non-public constructor. return absl::WrapUnique( new ListenSocketImpl(io_handle_ == nullptr ? nullptr : io_handle_->duplicate(), - address_provider_->localAddress())); + connection_info_provider_->localAddress())); } void setupSocket(const Network::Socket::OptionsSharedPtr& options); @@ -143,7 +143,7 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { ConnectionSocketImpl(Socket::Type type, const Address::InstanceConstSharedPtr& local_address, const Address::InstanceConstSharedPtr& remote_address) : SocketImpl(type, local_address, remote_address) { - address_provider_->setLocalAddress(local_address); + connection_info_provider_->setLocalAddress(local_address); } // Network::Socket @@ -167,10 +167,10 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { void setRequestedServerName(absl::string_view server_name) override { // Always keep the server_name_ as lower case. - addressProvider().setRequestedServerName(absl::AsciiStrToLower(server_name)); + connectionInfoProvider().setRequestedServerName(absl::AsciiStrToLower(server_name)); } absl::string_view requestedServerName() const override { - return addressProvider().requestedServerName(); + return connectionInfoProvider().requestedServerName(); } absl::optional lastRoundTripTime() override { @@ -180,7 +180,7 @@ class ConnectionSocketImpl : public SocketImpl, public ConnectionSocket { void dumpState(std::ostream& os, int indent_level) const override { const char* spaces = spacesForLevel(indent_level); os << spaces << "ListenSocketImpl " << this << DUMP_MEMBER(transport_protocol_) << "\n"; - DUMP_DETAILS(address_provider_); + DUMP_DETAILS(connection_info_provider_); } protected: diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 76d1bd47c37cf..c19ac2542f247 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -13,17 +13,19 @@ SocketImpl::SocketImpl(Socket::Type sock_type, const Address::InstanceConstSharedPtr& address_for_io_handle, const Address::InstanceConstSharedPtr& remote_address) : io_handle_(ioHandleForAddr(sock_type, address_for_io_handle)), - address_provider_(std::make_shared(nullptr, remote_address)), + connection_info_provider_( + std::make_shared(nullptr, remote_address)), sock_type_(sock_type), addr_type_(address_for_io_handle->type()) {} SocketImpl::SocketImpl(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& local_address, const Address::InstanceConstSharedPtr& remote_address) : io_handle_(std::move(io_handle)), - address_provider_(std::make_shared(local_address, remote_address)) { + connection_info_provider_( + std::make_shared(local_address, remote_address)) { - if (address_provider_->localAddress() != nullptr) { - addr_type_ = address_provider_->localAddress()->type(); + if (connection_info_provider_->localAddress() != nullptr) { + addr_type_ = connection_info_provider_->localAddress()->type(); return; } @@ -69,7 +71,7 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr bind_result = io_handle_->bind(address); if (bind_result.return_value_ == 0 && address->ip()->port() == 0) { - address_provider_->setLocalAddress(io_handle_->localAddress()); + connection_info_provider_->setLocalAddress(io_handle_->localAddress()); } return bind_result; } @@ -79,7 +81,7 @@ Api::SysCallIntResult SocketImpl::listen(int backlog) { return io_handle_->liste Api::SysCallIntResult SocketImpl::connect(const Network::Address::InstanceConstSharedPtr address) { auto result = io_handle_->connect(address); if (address->type() == Address::Type::Ip) { - address_provider_->setLocalAddress(io_handle_->localAddress()); + connection_info_provider_->setLocalAddress(io_handle_->localAddress()); } return result; } @@ -109,8 +111,8 @@ Api::SysCallIntResult SocketImpl::setBlockingForTest(bool blocking) { absl::optional SocketImpl::ipVersion() const { if (addr_type_ == Address::Type::Ip) { // Always hit after socket is initialized, i.e., accepted or connected - if (address_provider_->localAddress() != nullptr) { - return address_provider_->localAddress()->ip()->version(); + if (connection_info_provider_->localAddress() != nullptr) { + return connection_info_provider_->localAddress()->ip()->version(); } else { auto domain = io_handle_->domain(); if (!domain.has_value()) { diff --git a/source/common/network/socket_impl.h b/source/common/network/socket_impl.h index e04fae266b411..8c33d96bce229 100644 --- a/source/common/network/socket_impl.h +++ b/source/common/network/socket_impl.h @@ -8,10 +8,10 @@ namespace Envoy { namespace Network { -class SocketAddressSetterImpl : public SocketAddressSetter { +class ConnectionInfoSetterImpl : public ConnectionInfoSetter { public: - SocketAddressSetterImpl(const Address::InstanceConstSharedPtr& local_address, - const Address::InstanceConstSharedPtr& remote_address) + ConnectionInfoSetterImpl(const Address::InstanceConstSharedPtr& local_address, + const Address::InstanceConstSharedPtr& remote_address) : local_address_(local_address), remote_address_(remote_address), direct_remote_address_(remote_address) {} @@ -21,14 +21,14 @@ class SocketAddressSetterImpl : public SocketAddressSetter { void dumpState(std::ostream& os, int indent_level) const override { const char* spaces = spacesForLevel(indent_level); - os << spaces << "SocketAddressSetterImpl " << this + os << spaces << "ConnectionInfoSetterImpl " << this << DUMP_NULLABLE_MEMBER(remote_address_, remote_address_->asStringView()) << DUMP_NULLABLE_MEMBER(direct_remote_address_, direct_remote_address_->asStringView()) << DUMP_NULLABLE_MEMBER(local_address_, local_address_->asStringView()) << DUMP_MEMBER(server_name_) << "\n"; } - // SocketAddressSetter + // ConnectionInfoSetter const Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } void setLocalAddress(const Address::InstanceConstSharedPtr& local_address) override { local_address_ = local_address; @@ -51,6 +51,10 @@ class SocketAddressSetterImpl : public SocketAddressSetter { } absl::optional connectionID() const override { return connection_id_; } void setConnectionID(uint64_t id) override { connection_id_ = id; } + Ssl::ConnectionInfoConstSharedPtr sslConnection() const override { return ssl_info_; } + void setSslConnection(const Ssl::ConnectionInfoConstSharedPtr& ssl_connection_info) override { + ssl_info_ = ssl_connection_info; + } private: Address::InstanceConstSharedPtr local_address_; @@ -59,6 +63,7 @@ class SocketAddressSetterImpl : public SocketAddressSetter { Address::InstanceConstSharedPtr direct_remote_address_; std::string server_name_; absl::optional connection_id_; + Ssl::ConnectionInfoConstSharedPtr ssl_info_; }; class SocketImpl : public virtual Socket { @@ -67,10 +72,12 @@ class SocketImpl : public virtual Socket { const Address::InstanceConstSharedPtr& remote_address); // Network::Socket - SocketAddressSetter& addressProvider() override { return *address_provider_; } - const SocketAddressProvider& addressProvider() const override { return *address_provider_; } - SocketAddressProviderSharedPtr addressProviderSharedPtr() const override { - return address_provider_; + ConnectionInfoSetter& connectionInfoProvider() override { return *connection_info_provider_; } + const ConnectionInfoProvider& connectionInfoProvider() const override { + return *connection_info_provider_; + } + ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override { + return connection_info_provider_; } SocketPtr duplicate() override { // Implementing the functionality here for all sockets is tricky because it leads @@ -123,7 +130,7 @@ class SocketImpl : public virtual Socket { const Address::InstanceConstSharedPtr& remote_address); const IoHandlePtr io_handle_; - const std::shared_ptr address_provider_; + const std::shared_ptr connection_info_provider_; OptionsSharedPtr options_; Socket::Type sock_type_; Address::Type addr_type_; diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index f6dcb614d2788..764da61e94025 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -66,7 +66,7 @@ void UdpListenerImpl::handleReadCallback() { ENVOY_UDP_LOG(trace, "handleReadCallback"); cb_.onReadReady(); const Api::IoErrorPtr result = Utility::readPacketsFromSocket( - socket_->ioHandle(), *socket_->addressProvider().localAddress(), *this, time_source_, + socket_->ioHandle(), *socket_->connectionInfoProvider().localAddress(), *this, time_source_, config_.prefer_gro_, packets_dropped_); if (result == nullptr) { // No error. The number of reads was limited by read rate. There are more packets to read. @@ -102,7 +102,7 @@ void UdpListenerImpl::handleWriteCallback() { Event::Dispatcher& UdpListenerImpl::dispatcher() { return dispatcher_; } const Address::InstanceConstSharedPtr& UdpListenerImpl::localAddress() const { - return socket_->addressProvider().localAddress(); + return socket_->connectionInfoProvider().localAddress(); } Api::IoCallUint64Result UdpListenerImpl::send(const UdpSendData& send_data) { diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 4145f3f4d5d68..7cd7b19c00be4 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -285,11 +285,11 @@ bool Utility::isSameIpOrLoopback(const ConnectionSocket& socket) { // - Pipes // - Sockets to a loopback address // - Sockets where the local and remote address (ignoring port) are the same - const auto& remote_address = socket.addressProvider().remoteAddress(); + const auto& remote_address = socket.connectionInfoProvider().remoteAddress(); if (remote_address->type() == Address::Type::Pipe || isLoopbackAddress(*remote_address)) { return true; } - const auto local_ip = socket.addressProvider().localAddress()->ip(); + const auto local_ip = socket.connectionInfoProvider().localAddress()->ip(); const auto remote_ip = remote_address->ip(); if (remote_ip != nullptr && local_ip != nullptr && remote_ip->addressAsString() == local_ip->addressAsString()) { diff --git a/source/common/protobuf/BUILD b/source/common/protobuf/BUILD index 64bfefa6c0031..d3235a5757e97 100644 --- a/source/common/protobuf/BUILD +++ b/source/common/protobuf/BUILD @@ -61,7 +61,6 @@ envoy_cc_library( deps = [ ":message_validator_lib", ":protobuf", - ":well_known_lib", "//envoy/api:api_interface", "//envoy/protobuf:message_validator_interface", "//envoy/runtime:runtime_interface", @@ -70,8 +69,6 @@ envoy_cc_library( "//source/common/common:hash_lib", "//source/common/common:stl_helpers", "//source/common/common:utility_lib", - "//source/common/config:api_type_oracle_lib", - "//source/common/config:version_converter_lib", "//source/common/protobuf:visitor_lib", "//source/common/runtime:runtime_features_lib", "@com_github_cncf_udpa//udpa/annotations:pkg_cc_proto", @@ -86,8 +83,3 @@ envoy_cc_library( hdrs = ["visitor.h"], deps = [":protobuf"], ) - -envoy_cc_library( - name = "well_known_lib", - hdrs = ["well_known.h"], -) diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 04f5907db2cb1..c81d1b9245015 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -10,12 +10,9 @@ #include "source/common/common/assert.h" #include "source/common/common/documentation_url.h" #include "source/common/common/fmt.h" -#include "source/common/config/api_type_oracle.h" -#include "source/common/config/version_converter.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/common/protobuf/protobuf.h" #include "source/common/protobuf/visitor.h" -#include "source/common/protobuf/well_known.h" #include "source/common/runtime/runtime_features.h" #include "absl/strings/match.h" @@ -27,19 +24,6 @@ using namespace std::chrono_literals; namespace Envoy { namespace { -// For historical reasons, these v2 protos are allowed in v3 and will not be removed during the v2 -// turn down. -static const absl::flat_hash_set& v2ProtosAllowedInV3() { - CONSTRUCT_ON_FIRST_USE( - absl::flat_hash_set, - {"envoy.config.health_checker.redis.v2.Redis", - "envoy.config.filter.thrift.router.v2alpha1.Router", - "envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig", - "envoy.config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig", - "envoy.config.retry.omit_canary_hosts.v2.OmitCanaryHostsPredicate", - "envoy.config.retry.previous_hosts.v2.PreviousHostsPredicate"}); -} - absl::string_view filenameFromPath(absl::string_view full_path) { size_t index = full_path.rfind("/"); if (index == std::string::npos || index == full_path.size()) { @@ -122,7 +106,7 @@ ProtobufWkt::Value parseYamlNode(const YAML::Node& node) { void jsonConvertInternal(const Protobuf::Message& source, ProtobufMessage::ValidationVisitor& validation_visitor, - Protobuf::Message& dest, bool do_boosting = true) { + Protobuf::Message& dest) { Protobuf::util::JsonPrintOptions json_options; json_options.preserve_proto_field_names = true; std::string json; @@ -131,66 +115,7 @@ void jsonConvertInternal(const Protobuf::Message& source, throw EnvoyException(fmt::format("Unable to convert protobuf message to JSON string: {} {}", status.ToString(), source.DebugString())); } - MessageUtil::loadFromJson(json, dest, validation_visitor, do_boosting); -} - -enum class MessageVersion { - // This is an earlier version of a message, a later one exists. - EarlierVersion, - // This is the latest version of a message. - LatestVersion, - // Validating to see if the latest version will also be accepted; only apply message validators - // without side effects, validations should be strict. - LatestVersionValidate, -}; - -using MessageXformFn = std::function; - -class ApiBoostRetryException : public EnvoyException { -public: - ApiBoostRetryException(const std::string& message) : EnvoyException(message) {} -}; - -// Apply a function transforming a message (e.g. loading JSON into the message). -// First we try with the message's earlier type, and if unsuccessful (or no -// earlier) type, then the current type. This allows us to take a v3 Envoy -// internal proto and ingest both v2 and v3 in methods such as loadFromJson. -// This relies on the property that any v3 configuration that is readable as -// v2 has the same semantics in v2/v3, which holds due to the highly structured -// vN/v(N+1) mechanical transforms. -void tryWithApiBoosting(MessageXformFn f, Protobuf::Message& message) { - const Protobuf::Descriptor* earlier_version_desc = - Config::ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name()); - // If there is no earlier version of a message, just apply f directly. - if (earlier_version_desc == nullptr) { - f(message, MessageVersion::LatestVersion); - return; - } - - Protobuf::DynamicMessageFactory dmf; - auto earlier_message = ProtobufTypes::MessagePtr(dmf.GetPrototype(earlier_version_desc)->New()); - ASSERT(earlier_message != nullptr); - TRY_ASSERT_MAIN_THREAD { - // Try apply f with an earlier version of the message, then upgrade the - // result. - f(*earlier_message, MessageVersion::EarlierVersion); - // If we succeed at the earlier version, we ask the counterfactual, would this have worked at a - // later version? If not, this is v2 only and we need to warn. This is a waste of CPU cycles but - // we expect that JSON/YAML fragments will not be in use by any CPU limited use cases. - TRY_ASSERT_MAIN_THREAD { f(message, MessageVersion::LatestVersionValidate); } - END_TRY - catch (EnvoyException& e) { - MessageUtil::onVersionUpgradeDeprecation(e.what()); - } - // Now we do the real work of upgrading. - Config::VersionConverter::upgrade(*earlier_message, message); - } - END_TRY - catch (ApiBoostRetryException&) { - // If we fail at the earlier version, try f at the current version of the - // message. - f(message, MessageVersion::LatestVersion); - } + MessageUtil::loadFromJson(json, dest, validation_visitor); } // Logs a warning for use of a deprecated field or runtime-overridden use of an @@ -296,41 +221,6 @@ void ProtoExceptionUtil::throwProtoValidationException(const std::string& valida throw ProtoValidationException(validation_error, message); } -void MessageUtil::onVersionUpgradeDeprecation(absl::string_view desc, bool /*reject*/) { - const std::string& warning_str = - fmt::format("Configuration does not parse cleanly as v3. v2 configuration is " - "deprecated and will be removed from Envoy at the start of Q1 2021: {}", - desc); - // Always log at trace level. This is useful for tests that don't want to rely on possible - // elision. - ENVOY_LOG_MISC(trace, warning_str); - // Log each distinct message at warn level once every 5s. We use a static map here, which is fine - // as we are always on the main thread. - static auto* last_warned = new absl::flat_hash_map(); - const auto now = t_logclock::now().time_since_epoch().count(); - const auto it = last_warned->find(warning_str); - if (it == last_warned->end() || - (now - it->second) > std::chrono::duration_cast(5s).count()) { - ENVOY_LOG_MISC(warn, warning_str); - (*last_warned)[warning_str] = now; - } - Runtime::Loader* loader = Runtime::LoaderSingleton::getExisting(); - // We only log, and don't bump stats, if we're sufficiently early in server initialization (i.e. - // bootstrap). - if (loader != nullptr) { - loader->countDeprecatedFeatureUse(); - } - if (!Runtime::runtimeFeatureEnabled( - "envoy.test_only.broken_in_production.enable_deprecated_v2_api")) { - throw DeprecatedMajorVersionException(fmt::format( - "The v2 xDS major version is deprecated and disabled by default. Support for v2 will be " - "removed from Envoy at the start of Q1 2021. You may make use of v2 in Q4 2020 by " - "following " - "the advice in https://www.envoyproxy.io/docs/envoy/latest/faq/api/transition. ({})", - desc)); - } -} - size_t MessageUtil::hash(const Protobuf::Message& message) { std::string text_format; @@ -347,51 +237,35 @@ size_t MessageUtil::hash(const Protobuf::Message& message) { } void MessageUtil::loadFromJson(const std::string& json, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor, - bool do_boosting) { - auto load_json = [&json, &validation_visitor](Protobuf::Message& message, - MessageVersion message_version) { - Protobuf::util::JsonParseOptions options; - options.case_insensitive_enum_parsing = true; - // Let's first try and get a clean parse when checking for unknown fields; - // this should be the common case. - options.ignore_unknown_fields = false; - const auto strict_status = Protobuf::util::JsonStringToMessage(json, &message, options); - if (strict_status.ok()) { - // Success, no need to do any extra work. - return; - } - // If we fail, we see if we get a clean parse when allowing unknown fields. - // This is essentially a workaround - // for https://github.com/protocolbuffers/protobuf/issues/5967. - // TODO(htuch): clean this up when protobuf supports JSON/YAML unknown field - // detection directly. - options.ignore_unknown_fields = true; - const auto relaxed_status = Protobuf::util::JsonStringToMessage(json, &message, options); - // If we still fail with relaxed unknown field checking, the error has nothing - // to do with unknown fields. - if (!relaxed_status.ok()) { - throw EnvoyException("Unable to parse JSON as proto (" + relaxed_status.ToString() + - "): " + json); - } - // We know it's an unknown field at this point. If we're at the latest - // version, then it's definitely an unknown field, otherwise we try to - // load again at a later version. - if (message_version == MessageVersion::LatestVersion) { - validation_visitor.onUnknownField("type " + message.GetTypeName() + " reason " + - strict_status.ToString()); - } else if (message_version == MessageVersion::LatestVersionValidate) { - throw ProtobufMessage::UnknownProtoFieldException(absl::StrCat("Unknown field in: ", json)); - } else { - throw ApiBoostRetryException("Unknown field, possibly a rename, try again."); - } - }; - - if (do_boosting) { - tryWithApiBoosting(load_json, message); - } else { - load_json(message, MessageVersion::LatestVersion); + ProtobufMessage::ValidationVisitor& validation_visitor) { + Protobuf::util::JsonParseOptions options; + options.case_insensitive_enum_parsing = true; + // Let's first try and get a clean parse when checking for unknown fields; + // this should be the common case. + options.ignore_unknown_fields = false; + const auto strict_status = Protobuf::util::JsonStringToMessage(json, &message, options); + if (strict_status.ok()) { + // Success, no need to do any extra work. + return; } + // If we fail, we see if we get a clean parse when allowing unknown fields. + // This is essentially a workaround + // for https://github.com/protocolbuffers/protobuf/issues/5967. + // TODO(htuch): clean this up when protobuf supports JSON/YAML unknown field + // detection directly. + options.ignore_unknown_fields = true; + const auto relaxed_status = Protobuf::util::JsonStringToMessage(json, &message, options); + // If we still fail with relaxed unknown field checking, the error has nothing + // to do with unknown fields. + if (!relaxed_status.ok()) { + throw EnvoyException("Unable to parse JSON as proto (" + relaxed_status.ToString() + + "): " + json); + } + // We know it's an unknown field at this point. If we're at the latest + // version, then it's definitely an unknown field, otherwise we try to + // load again at a later version. + validation_visitor.onUnknownField("type " + message.GetTypeName() + " reason " + + strict_status.ToString()); } void MessageUtil::loadFromJson(const std::string& json, ProtobufWkt::Struct& message) { @@ -401,92 +275,42 @@ void MessageUtil::loadFromJson(const std::string& json, ProtobufWkt::Struct& mes } void MessageUtil::loadFromYaml(const std::string& yaml, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor, - bool do_boosting) { + ProtobufMessage::ValidationVisitor& validation_visitor) { ProtobufWkt::Value value = ValueUtil::loadFromYaml(yaml); if (value.kind_case() == ProtobufWkt::Value::kStructValue || value.kind_case() == ProtobufWkt::Value::kListValue) { - jsonConvertInternal(value, validation_visitor, message, do_boosting); + jsonConvertInternal(value, validation_visitor, message); return; } throw EnvoyException("Unable to convert YAML as JSON: " + yaml); } -void MessageUtil::loadFromYaml(const std::string& yaml, ProtobufWkt::Struct& message) { - // No need to validate if converting to a Struct, since there are no unknown - // fields possible. - return loadFromYaml(yaml, message, ProtobufMessage::getNullValidationVisitor()); -} - void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor, - Api::Api& api, bool do_boosting) { + Api::Api& api) { const std::string contents = api.fileSystem().fileReadToEnd(path); // If the filename ends with .pb, attempt to parse it as a binary proto. if (absl::EndsWithIgnoreCase(path, FileExtensions::get().ProtoBinary)) { // Attempt to parse the binary format. - auto read_proto_binary = [&contents, &validation_visitor](Protobuf::Message& message, - MessageVersion message_version) { - TRY_ASSERT_MAIN_THREAD { - if (message.ParseFromString(contents)) { - MessageUtil::checkForUnexpectedFields( - message, message_version == MessageVersion::LatestVersionValidate - ? ProtobufMessage::getStrictValidationVisitor() - : validation_visitor); - } - return; - } - END_TRY - catch (EnvoyException& ex) { - if (message_version == MessageVersion::LatestVersion || - message_version == MessageVersion::LatestVersionValidate) { - // Failed reading the latest version - pass the same error upwards - throw ex; - } - } - throw ApiBoostRetryException( - "Failed to parse at earlier version, trying again at later version."); - }; - - if (do_boosting) { - // Attempts to read as the previous version and upgrade, and if it fails - // attempts to read as latest version. - tryWithApiBoosting(read_proto_binary, message); - } else { - read_proto_binary(message, MessageVersion::LatestVersion); + if (message.ParseFromString(contents)) { + MessageUtil::checkForUnexpectedFields(message, validation_visitor); } return; } // If the filename ends with .pb_text, attempt to parse it as a text proto. if (absl::EndsWithIgnoreCase(path, FileExtensions::get().ProtoText)) { - auto read_proto_text = [&contents, &path](Protobuf::Message& message, - MessageVersion message_version) { - if (Protobuf::TextFormat::ParseFromString(contents, &message)) { - return; - } - if (message_version == MessageVersion::LatestVersion || - message_version == MessageVersion::LatestVersionValidate) { - throw EnvoyException("Unable to parse file \"" + path + "\" as a text protobuf (type " + - message.GetTypeName() + ")"); - } else { - throw ApiBoostRetryException( - "Failed to parse at earlier version, trying again at later version."); - } - }; - - if (do_boosting) { - tryWithApiBoosting(read_proto_text, message); - } else { - read_proto_text(message, MessageVersion::LatestVersion); + if (Protobuf::TextFormat::ParseFromString(contents, &message)) { + return; } - return; + throw EnvoyException("Unable to parse file \"" + path + "\" as a text protobuf (type " + + message.GetTypeName() + ")"); } if (absl::EndsWithIgnoreCase(path, FileExtensions::get().Yaml) || absl::EndsWithIgnoreCase(path, FileExtensions::get().Yml)) { - loadFromYaml(contents, message, validation_visitor, do_boosting); + loadFromYaml(contents, message, validation_visitor); } else { - loadFromJson(contents, message, validation_visitor, do_boosting); + loadFromJson(contents, message, validation_visitor); } } @@ -545,7 +369,7 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { // If this field is deprecated, warn or throw an error. if (field.options().deprecated()) { - if (absl::StartsWith(field.name(), Config::VersionUtil::DeprecatedFieldShadowPrefix)) { + if (absl::StartsWith(field.name(), "hidden_envoy_deprecated_")) { // The field was marked as hidden_envoy_deprecated and an error must be thrown, // unless it is part of an explicit test that needs access to the deprecated field // when we enable runtime deprecation override to allow point field overrides for tests. @@ -579,9 +403,6 @@ class UnexpectedFieldProtoVisitor : public ProtobufMessage::ConstProtoVisitor { if (!unknown_fields.empty()) { std::string error_msg; for (int n = 0; n < unknown_fields.field_count(); ++n) { - if (unknown_fields.field(n).number() == ProtobufWellKnown::OriginalTypeFieldNumber) { - continue; - } error_msg += absl::StrCat(n > 0 ? ", " : "", unknown_fields.field(n).number()); } // We use the validation visitor but have hard coded behavior below for deprecated fields. @@ -605,8 +426,7 @@ void MessageUtil::checkForUnexpectedFields(const Protobuf::Message& message, ProtobufMessage::ValidationVisitor& validation_visitor, Runtime::Loader* runtime) { UnexpectedFieldProtoVisitor unexpected_field_visitor(validation_visitor, runtime); - ProtobufMessage::traverseMessage(unexpected_field_visitor, API_RECOVER_ORIGINAL(message), - nullptr); + ProtobufMessage::traverseMessage(unexpected_field_visitor, message, nullptr); } std::string MessageUtil::getYamlStringFromMessage(const Protobuf::Message& message, @@ -673,35 +493,6 @@ std::string MessageUtil::getJsonStringFromMessageOrError(const Protobuf::Message } void MessageUtil::unpackTo(const ProtobufWkt::Any& any_message, Protobuf::Message& message) { - // If we don't have a type URL match, try an earlier version. - const absl::string_view any_full_name = - TypeUtil::typeUrlToDescriptorFullName(any_message.type_url()); - if (any_full_name != message.GetDescriptor()->full_name()) { - const Protobuf::Descriptor* earlier_version_desc = - Config::ApiTypeOracle::getEarlierVersionDescriptor(message.GetDescriptor()->full_name()); - // If the earlier version matches, unpack and upgrade. - if (earlier_version_desc != nullptr && any_full_name == earlier_version_desc->full_name()) { - // Take the Any message but adjust its type URL, since earlier/later versions are wire - // compatible. - ProtobufWkt::Any any_message_with_fixup; - any_message_with_fixup.MergeFrom(any_message); - any_message_with_fixup.set_type_url("type.googleapis.com/" + - message.GetDescriptor()->full_name()); - if (!any_message_with_fixup.UnpackTo(&message)) { - throw EnvoyException(fmt::format("Unable to unpack as {}: {}", - earlier_version_desc->full_name(), - any_message_with_fixup.DebugString())); - } - Config::VersionConverter::annotateWithOriginalType(*earlier_version_desc, message); - // We allow some v2 protos in v3 for historical reasons. - if (v2ProtosAllowedInV3().count(any_full_name) == 0) { - MessageUtil::onVersionUpgradeDeprecation(any_full_name); - } - return; - } - } - // Otherwise, just unpack to the message. Type URL mismatches will be signaled - // by UnpackTo failure. if (!any_message.UnpackTo(&message)) { throw EnvoyException(fmt::format("Unable to unpack as {}: {}", message.GetDescriptor()->full_name(), @@ -893,6 +684,14 @@ void MessageUtil::redact(Protobuf::Message& message) { ::Envoy::redact(&message, /* ancestor_is_sensitive = */ false); } +void MessageUtil::wireCast(const Protobuf::Message& src, Protobuf::Message& dst) { + // This should should generally succeed, but if there are malformed UTF-8 strings in a message, + // this can fail. + if (!dst.ParseFromString(src.SerializeAsString())) { + throw EnvoyException("Unable to deserialize during wireCast()"); + } +} + ProtobufWkt::Value ValueUtil::loadFromYaml(const std::string& yaml) { TRY_ASSERT_MAIN_THREAD { return parseYamlNode(YAML::Load(yaml)); } END_TRY diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index 7ccc41394eafd..e8f4a61cd90b6 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -11,7 +11,6 @@ #include "source/common/common/hash.h" #include "source/common/common/stl_helpers.h" #include "source/common/common/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/protobuf/protobuf.h" #include "source/common/singleton/const_singleton.h" @@ -252,16 +251,12 @@ class MessageUtil { static std::size_t hash(const Protobuf::Message& message); static void loadFromJson(const std::string& json, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor, - bool do_boosting = true); + ProtobufMessage::ValidationVisitor& validation_visitor); static void loadFromJson(const std::string& json, ProtobufWkt::Struct& message); static void loadFromYaml(const std::string& yaml, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor, - bool do_boosting = true); - static void loadFromYaml(const std::string& yaml, ProtobufWkt::Struct& message); + ProtobufMessage::ValidationVisitor& validation_visitor); static void loadFromFile(const std::string& path, Protobuf::Message& message, - ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, - bool do_boosting = true); + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api); /** * Checks for use of deprecated fields in message and all sub-messages. @@ -292,15 +287,14 @@ class MessageUtil { std::string err; if (!Validate(message, &err)) { - ProtoExceptionUtil::throwProtoValidationException(err, API_RECOVER_ORIGINAL(message)); + ProtoExceptionUtil::throwProtoValidationException(err, message); } } template static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message, - ProtobufMessage::ValidationVisitor& validation_visitor, - bool avoid_boosting = false) { - loadFromYaml(yaml, message, validation_visitor, !avoid_boosting); + ProtobufMessage::ValidationVisitor& validation_visitor) { + loadFromYaml(yaml, message, validation_visitor); validate(message, validation_visitor); } @@ -394,14 +388,6 @@ class MessageUtil { return typed_message; }; - /** - * Invoke when a version upgrade (e.g. v2 -> v3) is detected. This may warn or throw - * depending on where we are in the major version deprecation cycle. - * @param desc description of upgrade to include in warning or exception. - * @param reject should a DeprecatedMajorVersionException be thrown on failure? - */ - static void onVersionUpgradeDeprecation(absl::string_view desc, bool reject = true); - /** * Obtain a string field from a protobuf message dynamically. * @@ -533,6 +519,17 @@ class MessageUtil { * @param message message to redact. */ static void redact(Protobuf::Message& message); + + /** + * Reinterpret a Protobuf message as another Protobuf message by converting to wire format and + * back. This only works for messages that can be effectively duck typed this way, e.g. with a + * subtype relationship modulo field name. + * + * @param src source message. + * @param dst destination message. + * @throw EnvoyException if a conversion error occurs. + */ + static void wireCast(const Protobuf::Message& src, Protobuf::Message& dst); }; class ValueUtil { diff --git a/source/common/protobuf/visitor.cc b/source/common/protobuf/visitor.cc index ed6d0b6857d9a..0e22cb2f076a8 100644 --- a/source/common/protobuf/visitor.cc +++ b/source/common/protobuf/visitor.cc @@ -3,27 +3,6 @@ namespace Envoy { namespace ProtobufMessage { -void traverseMutableMessage(ProtoVisitor& visitor, Protobuf::Message& message, const void* ctxt) { - visitor.onMessage(message, ctxt); - const Protobuf::Descriptor* descriptor = message.GetDescriptor(); - const Protobuf::Reflection* reflection = message.GetReflection(); - for (int i = 0; i < descriptor->field_count(); ++i) { - const Protobuf::FieldDescriptor* field = descriptor->field(i); - const void* field_ctxt = visitor.onField(message, *field, ctxt); - // If this is a message, recurse to visit fields in the sub-message. - if (field->cpp_type() == Protobuf::FieldDescriptor::CPPTYPE_MESSAGE) { - if (field->is_repeated()) { - const int size = reflection->FieldSize(message, field); - for (int j = 0; j < size; ++j) { - traverseMutableMessage(visitor, *reflection->MutableRepeatedMessage(&message, field, j), - field_ctxt); - } - } else if (reflection->HasField(message, field)) { - traverseMutableMessage(visitor, *reflection->MutableMessage(&message, field), field_ctxt); - } - } - } -} void traverseMessage(ConstProtoVisitor& visitor, const Protobuf::Message& message, const void* ctxt) { visitor.onMessage(message, ctxt); diff --git a/source/common/protobuf/visitor.h b/source/common/protobuf/visitor.h index 129f5e52d4743..3c2b782a84e91 100644 --- a/source/common/protobuf/visitor.h +++ b/source/common/protobuf/visitor.h @@ -5,21 +5,6 @@ namespace Envoy { namespace ProtobufMessage { -class ProtoVisitor { -public: - virtual ~ProtoVisitor() = default; - - // Invoked when a field is visited, with the message, field descriptor and context. Returns a new - // context for use when traversing the sub-message in a field. - virtual const void* onField(Protobuf::Message&, const Protobuf::FieldDescriptor&, - const void* ctxt) { - return ctxt; - } - - // Invoked when a message is visited, with the message and a context. - virtual void onMessage(Protobuf::Message&, const void*){}; -}; - class ConstProtoVisitor { public: virtual ~ConstProtoVisitor() = default; @@ -35,7 +20,6 @@ class ConstProtoVisitor { virtual void onMessage(const Protobuf::Message&, const void*){}; }; -void traverseMutableMessage(ProtoVisitor& visitor, Protobuf::Message& message, const void* ctxt); void traverseMessage(ConstProtoVisitor& visitor, const Protobuf::Message& message, const void* ctxt); diff --git a/source/common/protobuf/well_known.h b/source/common/protobuf/well_known.h deleted file mode 100644 index dcd2a9a82b4be..0000000000000 --- a/source/common/protobuf/well_known.h +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#include - -namespace Envoy { -namespace ProtobufWellKnown { - -// Used by VersionConverter to track the original type of an upgraded message. -// Magic number in this file derived from top 28bit of SHA256 digest of -// "original type". -constexpr uint32_t OriginalTypeFieldNumber = 183412668; - -} // namespace ProtobufWellKnown -} // namespace Envoy diff --git a/source/common/quic/BUILD b/source/common/quic/BUILD index 24ad4b8453ea9..fed592a8ddb11 100644 --- a/source/common/quic/BUILD +++ b/source/common/quic/BUILD @@ -20,8 +20,8 @@ envoy_cc_library( deps = [ "//envoy/event:dispatcher_interface", "//envoy/event:timer_interface", - "@com_googlesource_quiche//:quic_core_alarm_lib", - "@com_googlesource_quiche//:quic_core_clock_lib", + "@com_github_google_quiche//:quic_core_alarm_lib", + "@com_github_google_quiche//:quic_core_clock_lib", ], ) @@ -33,9 +33,9 @@ envoy_cc_library( tags = ["nofips"], deps = [ ":envoy_quic_alarm_lib", - "@com_googlesource_quiche//:quic_core_alarm_factory_lib", - "@com_googlesource_quiche//:quic_core_arena_scoped_ptr_lib", - "@com_googlesource_quiche//:quic_core_one_block_arena_lib", + "@com_github_google_quiche//:quic_core_alarm_factory_lib", + "@com_github_google_quiche//:quic_core_arena_scoped_ptr_lib", + "@com_github_google_quiche//:quic_core_one_block_arena_lib", ], ) @@ -45,9 +45,9 @@ envoy_cc_library( tags = ["nofips"], deps = [ "//source/common/quic/platform:envoy_quic_clock_lib", - "@com_googlesource_quiche//:quic_core_buffer_allocator_lib", - "@com_googlesource_quiche//:quic_core_connection_lib", - "@com_googlesource_quiche//:quic_core_crypto_random_lib", + "@com_github_google_quiche//:quic_core_buffer_allocator_lib", + "@com_github_google_quiche//:quic_core_connection_lib", + "@com_github_google_quiche//:quic_core_crypto_random_lib", ], ) @@ -59,8 +59,8 @@ envoy_cc_library( deps = [ "//envoy/stats:stats_interface", "//source/common/stats:symbol_table_lib", - "@com_googlesource_quiche//:quic_core_error_codes_lib", - "@com_googlesource_quiche//:quic_core_types_lib", + "@com_github_google_quiche//:quic_core_error_codes_lib", + "@com_github_google_quiche//:quic_core_types_lib", ], ) @@ -72,11 +72,11 @@ envoy_cc_library( tags = ["nofips"], deps = [ ":envoy_quic_utils_lib", - "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", - "@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib", - "@com_googlesource_quiche//:quic_core_crypto_proof_source_lib", - "@com_googlesource_quiche//:quic_core_data_lib", - "@com_googlesource_quiche//:quic_core_versions_lib", + "@com_github_google_quiche//:quic_core_crypto_certificate_view_lib", + "@com_github_google_quiche//:quic_core_crypto_crypto_handshake_lib", + "@com_github_google_quiche//:quic_core_crypto_proof_source_lib", + "@com_github_google_quiche//:quic_core_data_lib", + "@com_github_google_quiche//:quic_core_versions_lib", ], ) @@ -93,7 +93,7 @@ envoy_cc_library( ":quic_transport_socket_factory_lib", "//envoy/ssl:tls_certificate_config_interface", "//source/server:connection_handler_lib", - "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", + "@com_github_google_quiche//:quic_core_crypto_certificate_view_lib", ], ) @@ -105,9 +105,9 @@ envoy_cc_library( tags = ["nofips"], deps = [ ":envoy_quic_utils_lib", - "@com_googlesource_quiche//:quic_core_crypto_certificate_view_lib", - "@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib", - "@com_googlesource_quiche//:quic_core_versions_lib", + "@com_github_google_quiche//:quic_core_crypto_certificate_view_lib", + "@com_github_google_quiche//:quic_core_crypto_crypto_handshake_lib", + "@com_github_google_quiche//:quic_core_versions_lib", ], ) @@ -131,7 +131,7 @@ envoy_cc_library( external_deps = ["quiche_quic_platform"], tags = ["nofips"], deps = [ - "@com_googlesource_quiche//:quic_core_crypto_crypto_handshake_lib", + "@com_github_google_quiche//:quic_core_crypto_crypto_handshake_lib", ], ) @@ -141,7 +141,7 @@ envoy_cc_library( tags = ["nofips"], deps = [ "//source/common/common:assert_lib", - "@com_googlesource_quiche//:quic_core_http_spdy_server_push_utils_header", + "@com_github_google_quiche//:quic_core_http_spdy_server_push_utils_header", ], ) @@ -180,7 +180,7 @@ envoy_cc_library( "//source/common/http/http3:quic_client_connection_factory_lib", "//source/extensions/quic/crypto_stream:envoy_quic_crypto_client_stream_lib", "//source/extensions/transport_sockets/tls:ssl_socket_lib", - "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + "@com_github_google_quiche//:quic_core_http_spdy_session_lib", ], ) @@ -195,7 +195,7 @@ envoy_cc_library( ":envoy_quic_utils_lib", "//envoy/http:codec_interface", "//envoy/registry", - "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + "@com_github_google_quiche//:quic_core_http_spdy_session_lib", ], ) @@ -217,7 +217,7 @@ envoy_cc_library( "//source/common/http/http3:codec_stats_lib", "//source/common/network:connection_base_lib", "//source/common/stream_info:stream_info_lib", - "@com_googlesource_quiche//:quic_core_connection_lib", + "@com_github_google_quiche//:quic_core_connection_lib", ], ) @@ -243,8 +243,7 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/http:header_map_lib", - "//source/common/quic/platform:quic_platform_mem_slice_storage_impl_lib", - "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + "@com_github_google_quiche//:quic_core_http_spdy_session_lib", ], ) @@ -271,8 +270,7 @@ envoy_cc_library( "//source/common/http:codes_lib", "//source/common/http:header_map_lib", "//source/common/http:header_utility_lib", - "//source/common/quic/platform:quic_platform_mem_slice_storage_impl_lib", - "@com_googlesource_quiche//:quic_core_http_client_lib", + "@com_github_google_quiche//:quic_core_http_client_lib", ], ) @@ -306,7 +304,7 @@ envoy_cc_library( ":quic_network_connection_lib", "//source/common/quic:envoy_quic_utils_lib", "//source/server:connection_handler_lib", - "@com_googlesource_quiche//:quic_core_connection_lib", + "@com_github_google_quiche//:quic_core_connection_lib", ], ) @@ -321,7 +319,7 @@ envoy_cc_library( "//envoy/event:dispatcher_interface", "//source/common/network:socket_option_factory_lib", "//source/common/network:udp_packet_writer_handler_lib", - "@com_googlesource_quiche//:quic_core_connection_lib", + "@com_github_google_quiche//:quic_core_connection_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -339,8 +337,8 @@ envoy_cc_library( ":quic_stat_names_lib", "//envoy/network:listener_interface", "//source/server:connection_handler_lib", - "@com_googlesource_quiche//:quic_core_server_lib", - "@com_googlesource_quiche//:quic_core_utils_lib", + "@com_github_google_quiche//:quic_core_server_lib", + "@com_github_google_quiche//:quic_core_utils_lib", ], ) @@ -391,8 +389,8 @@ envoy_cc_library( "//source/common/network:listen_socket_lib", "//source/common/network:socket_option_factory_lib", "//source/common/quic:quic_io_handle_wrapper_lib", - "@com_googlesource_quiche//:quic_core_config_lib", - "@com_googlesource_quiche//:quic_core_http_header_list_lib", + "@com_github_google_quiche//:quic_core_config_lib", + "@com_github_google_quiche//:quic_core_http_header_list_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], @@ -442,7 +440,7 @@ envoy_cc_library( tags = ["nofips"], deps = [ ":envoy_quic_utils_lib", - "@com_googlesource_quiche//:quic_core_packet_writer_lib", + "@com_github_google_quiche//:quic_core_packet_writer_lib", ], ) @@ -461,7 +459,7 @@ envoy_cc_library( "//source/common/network:io_socket_error_lib", "//source/common/protobuf:utility_lib", "//source/common/runtime:runtime_lib", - "@com_googlesource_quiche//:quic_core_batch_writer_gso_batch_writer_lib", + "@com_github_google_quiche//:quic_core_batch_writer_gso_batch_writer_lib", ], ) @@ -471,7 +469,7 @@ envoy_cc_library( hdrs = ["send_buffer_monitor.h"], tags = ["nofips"], deps = [ - "@com_googlesource_quiche//:quic_core_session_lib", + "@com_github_google_quiche//:quic_core_session_lib", ], ) @@ -481,7 +479,7 @@ envoy_cc_library( tags = ["nofips"], deps = [ "//envoy/config:typed_config_interface", - "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", + "@com_github_google_quiche//:quic_core_http_spdy_session_lib", ], ) @@ -492,6 +490,6 @@ envoy_cc_library( deps = [ "//envoy/config:typed_config_interface", "//source/server:connection_handler_lib", - "@com_googlesource_quiche//:quic_core_crypto_proof_source_lib", + "@com_github_google_quiche//:quic_core_crypto_proof_source_lib", ], ) diff --git a/source/common/quic/active_quic_listener.cc b/source/common/quic/active_quic_listener.cc index 840224c2a4829..5bd7651758ada 100644 --- a/source/common/quic/active_quic_listener.cc +++ b/source/common/quic/active_quic_listener.cc @@ -21,6 +21,8 @@ namespace Envoy { namespace Quic { +bool ActiveQuicListenerFactory::disable_kernel_bpf_packet_routing_for_test_ = false; + ActiveQuicListener::ActiveQuicListener( uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, Network::UdpConnectionHandler& parent, Network::ListenerConfig& listener_config, @@ -49,7 +51,7 @@ ActiveQuicListener::ActiveQuicListener( listen_socket, *this, listener_config.udpListenerConfig()->config().downstream_socket_config()), &listener_config), - dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedVersions()), + dispatcher_(dispatcher), version_manager_(quic::CurrentSupportedHttp3Versions()), kernel_worker_routing_(kernel_worker_routing), packets_to_read_to_connection_count_ratio_(packets_to_read_to_connection_count_ratio), crypto_server_stream_factory_(crypto_server_stream_factory) { @@ -300,8 +302,7 @@ ActiveQuicListenerFactory::ActiveQuicListenerFactory( {0x16, 0, 0, 0000000000}, // ret a }; // SPELLCHECKER(on) - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing")) { + if (!disable_kernel_bpf_packet_routing_for_test_) { if (concurrency_ > 1) { // Note that this option refers to the BPF program data above, which must live until the // option is used. The program is kept as a member variable for this purpose. diff --git a/source/common/quic/active_quic_listener.h b/source/common/quic/active_quic_listener.h index d4c558a3728d0..be72d17334770 100644 --- a/source/common/quic/active_quic_listener.h +++ b/source/common/quic/active_quic_listener.h @@ -106,6 +106,10 @@ class ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory, bool isTransportConnectionless() const override { return false; } const Network::Socket::OptionsSharedPtr& socketOptions() const override { return options_; } + static void setDisableKernelBpfPacketRoutingForTest(bool val) { + disable_kernel_bpf_packet_routing_for_test_ = val; + } + private: friend class ActiveQuicListenerFactoryPeer; @@ -121,6 +125,8 @@ class ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory, const Network::Socket::OptionsSharedPtr options_{std::make_shared()}; bool kernel_worker_routing_{}; + static bool disable_kernel_bpf_packet_routing_for_test_; + #if defined(SO_ATTACH_REUSEPORT_CBPF) && defined(__linux__) sock_fprog prog_; std::vector filter_; diff --git a/source/common/quic/client_connection_factory_impl.cc b/source/common/quic/client_connection_factory_impl.cc index 2e796745a4ca4..00d86543436ff 100644 --- a/source/common/quic/client_connection_factory_impl.cc +++ b/source/common/quic/client_connection_factory_impl.cc @@ -64,18 +64,17 @@ createQuicNetworkConnection(Http::PersistentQuicInfo& info, Event::Dispatcher& d if (config == nullptr) { return nullptr; // no secrets available yet. } - + quic::ParsedQuicVersionVector quic_versions = quic::CurrentSupportedHttp3Versions(); + ASSERT(!quic_versions.empty()); auto connection = std::make_unique( quic::QuicUtils::CreateRandomConnectionId(), server_addr, info_impl->conn_helper_, - info_impl->alarm_factory_, quic::ParsedQuicVersionVector{info_impl->supported_versions_[0]}, - local_addr, dispatcher, nullptr); + info_impl->alarm_factory_, quic_versions, local_addr, dispatcher, nullptr); - ASSERT(!info_impl->supported_versions_.empty()); // QUICHE client session always use the 1st version to start handshake. auto ret = std::make_unique( - info_impl->quic_config_, info_impl->supported_versions_, std::move(connection), - info_impl->server_id_, std::move(config), &info_impl->push_promise_index_, dispatcher, - info_impl->buffer_limit_, info_impl->crypto_stream_factory_, quic_stat_names, scope); + info_impl->quic_config_, quic_versions, std::move(connection), info_impl->server_id_, + std::move(config), &info_impl->push_promise_index_, dispatcher, info_impl->buffer_limit_, + info_impl->crypto_stream_factory_, quic_stat_names, scope); return ret; } diff --git a/source/common/quic/client_connection_factory_impl.h b/source/common/quic/client_connection_factory_impl.h index 5267c67b8fd0f..acc1d0cb244c2 100644 --- a/source/common/quic/client_connection_factory_impl.h +++ b/source/common/quic/client_connection_factory_impl.h @@ -40,7 +40,6 @@ struct PersistentQuicInfoImpl : public Http::PersistentQuicInfo { Envoy::Ssl::ClientContextSharedPtr client_context_; // If client context changes, client config will be updated as well. std::shared_ptr client_config_; - const quic::ParsedQuicVersionVector supported_versions_{quic::CurrentSupportedVersions()}; quic::QuicConfig quic_config_; // The cluster buffer limits. const uint32_t buffer_limit_; diff --git a/source/common/quic/codec_impl.cc b/source/common/quic/codec_impl.cc index 546ea7ee0c193..0afcf65b99055 100644 --- a/source/common/quic/codec_impl.cc +++ b/source/common/quic/codec_impl.cc @@ -51,19 +51,11 @@ void QuicHttpServerConnectionImpl::onUnderlyingConnectionBelowWriteBufferLowWate } void QuicHttpServerConnectionImpl::shutdownNotice() { - if (quic::VersionUsesHttp3(quic_server_session_.transport_version())) { - quic_server_session_.SendHttp3GoAway(quic::QUIC_PEER_GOING_AWAY, "Server shutdown"); - } else { - ENVOY_CONN_LOG(debug, "Shutdown notice is not propagated to QUIC.", quic_server_session_); - } + quic_server_session_.SendHttp3GoAway(quic::QUIC_PEER_GOING_AWAY, "Server shutdown"); } void QuicHttpServerConnectionImpl::goAway() { - if (quic::VersionUsesHttp3(quic_server_session_.transport_version())) { - quic_server_session_.SendHttp3GoAway(quic::QUIC_PEER_GOING_AWAY, "server shutdown imminent"); - } else { - quic_server_session_.SendGoAway(quic::QUIC_PEER_GOING_AWAY, "server shutdown imminent"); - } + quic_server_session_.SendHttp3GoAway(quic::QUIC_PEER_GOING_AWAY, "server shutdown imminent"); } QuicHttpClientConnectionImpl::QuicHttpClientConnectionImpl( diff --git a/source/common/quic/envoy_quic_client_connection.cc b/source/common/quic/envoy_quic_client_connection.cc index 18221eb9bb355..003851a8bb8cd 100644 --- a/source/common/quic/envoy_quic_client_connection.cc +++ b/source/common/quic/envoy_quic_client_connection.cc @@ -42,7 +42,7 @@ EnvoyQuicClientConnection::EnvoyQuicClientConnection( Network::ConnectionSocketPtr&& connection_socket) : quic::QuicConnection(server_connection_id, quic::QuicSocketAddress(), envoyIpAddressToQuicSocketAddress( - connection_socket->addressProvider().remoteAddress()->ip()), + connection_socket->connectionInfoProvider().remoteAddress()->ip()), &helper, &alarm_factory, writer, owns_writer, quic::Perspective::IS_CLIENT, supported_versions), QuicNetworkConnection(std::move(connection_socket)), dispatcher_(dispatcher) {} @@ -51,9 +51,6 @@ void EnvoyQuicClientConnection::processPacket( Network::Address::InstanceConstSharedPtr local_address, Network::Address::InstanceConstSharedPtr peer_address, Buffer::InstancePtr buffer, MonotonicTime receive_time) { - if (!connected()) { - return; - } quic::QuicTime timestamp = quic::QuicTime::Zero() + quic::QuicTime::Delta::FromMicroseconds( @@ -98,10 +95,10 @@ void EnvoyQuicClientConnection::switchConnectionSocket( Network::ConnectionSocketPtr&& connection_socket) { auto writer = std::make_unique( std::make_unique(connection_socket->ioHandle())); - quic::QuicSocketAddress self_address = - envoyIpAddressToQuicSocketAddress(connection_socket->addressProvider().localAddress()->ip()); - quic::QuicSocketAddress peer_address = - envoyIpAddressToQuicSocketAddress(connection_socket->addressProvider().remoteAddress()->ip()); + quic::QuicSocketAddress self_address = envoyIpAddressToQuicSocketAddress( + connection_socket->connectionInfoProvider().localAddress()->ip()); + quic::QuicSocketAddress peer_address = envoyIpAddressToQuicSocketAddress( + connection_socket->connectionInfoProvider().remoteAddress()->ip()); // The old socket is closed in this call. setConnectionSocket(std::move(connection_socket)); @@ -127,8 +124,9 @@ void EnvoyQuicClientConnection::onFileEvent(uint32_t events) { // right default for QUIC. Determine whether this should be configurable or not. if (connected() && (events & Event::FileReadyType::Read)) { Api::IoErrorPtr err = Network::Utility::readPacketsFromSocket( - connectionSocket()->ioHandle(), *connectionSocket()->addressProvider().localAddress(), - *this, dispatcher_.timeSource(), true, packets_dropped_); + connectionSocket()->ioHandle(), + *connectionSocket()->connectionInfoProvider().localAddress(), *this, + dispatcher_.timeSource(), true, packets_dropped_); if (err == nullptr) { connectionSocket()->ioHandle().activateFileEvents(Event::FileReadyType::Read); return; diff --git a/source/common/quic/envoy_quic_client_session.cc b/source/common/quic/envoy_quic_client_session.cc index e300b820f873e..df9fd5f7a6755 100644 --- a/source/common/quic/envoy_quic_client_session.cc +++ b/source/common/quic/envoy_quic_client_session.cc @@ -48,26 +48,10 @@ void EnvoyQuicClientSession::Initialize() { } void EnvoyQuicClientSession::OnCanWrite() { - if (quic::VersionUsesHttp3(transport_version())) { - quic::QuicSpdyClientSession::OnCanWrite(); - } else { - // This will cause header stream flushing. It is the only place to discount bytes buffered in - // header stream from connection watermark buffer during writing. - SendBufferMonitor::ScopedWatermarkBufferUpdater updater(headers_stream(), this); - quic::QuicSpdyClientSession::OnCanWrite(); - } + quic::QuicSpdyClientSession::OnCanWrite(); maybeApplyDelayClosePolicy(); } -void EnvoyQuicClientSession::OnGoAway(const quic::QuicGoAwayFrame& frame) { - ENVOY_CONN_LOG(debug, "GOAWAY received with error {}: {}", *this, - quic::QuicErrorCodeToString(frame.error_code), frame.reason_phrase); - quic::QuicSpdyClientSession::OnGoAway(frame); - if (http_connection_callbacks_ != nullptr) { - http_connection_callbacks_->onGoAway(quicErrorCodeToEnvoyErrorCode(frame.error_code)); - } -} - void EnvoyQuicClientSession::OnHttp3GoAway(uint64_t stream_id) { ENVOY_CONN_LOG(debug, "HTTP/3 GOAWAY received", *this); quic::QuicSpdyClientSession::OnHttp3GoAway(stream_id); @@ -77,6 +61,20 @@ void EnvoyQuicClientSession::OnHttp3GoAway(uint64_t stream_id) { } } +void EnvoyQuicClientSession::MaybeSendRstStreamFrame(quic::QuicStreamId id, + quic::QuicRstStreamErrorCode error, + quic::QuicStreamOffset bytes_written) { + QuicSpdyClientSession::MaybeSendRstStreamFrame(id, error, bytes_written); + quic_stat_names_.chargeQuicResetStreamErrorStats(scope_, error, /*from_self*/ true, + /*is_upstream*/ true); +} + +void EnvoyQuicClientSession::OnRstStream(const quic::QuicRstStreamFrame& frame) { + QuicSpdyClientSession::OnRstStream(frame); + quic_stat_names_.chargeQuicResetStreamErrorStats(scope_, frame.error_code, + /*from_self*/ false, /*is_upstream*/ true); +} + void EnvoyQuicClientSession::SetDefaultEncryptionLevel(quic::EncryptionLevel level) { quic::QuicSpdyClientSession::SetDefaultEncryptionLevel(level); if (level == quic::ENCRYPTION_FORWARD_SECURE) { @@ -93,13 +91,13 @@ std::unique_ptr EnvoyQuicClientSession::CreateClient } quic::QuicSpdyStream* EnvoyQuicClientSession::CreateIncomingStream(quic::QuicStreamId /*id*/) { - // Disallow server initiated stream. - NOT_REACHED_GCOVR_EXCL_LINE; + // Envoy doesn't support server initiated stream. + return nullptr; } quic::QuicSpdyStream* EnvoyQuicClientSession::CreateIncomingStream(quic::PendingStream* /*pending*/) { - // Disallow server initiated stream. + // Envoy doesn't support server push. NOT_REACHED_GCOVR_EXCL_LINE; } @@ -114,25 +112,10 @@ quic::QuicConnection* EnvoyQuicClientSession::quicConnection() { } void EnvoyQuicClientSession::OnTlsHandshakeComplete() { + quic::QuicSpdyClientSession::OnTlsHandshakeComplete(); raiseConnectionEvent(Network::ConnectionEvent::Connected); } -size_t EnvoyQuicClientSession::WriteHeadersOnHeadersStream( - quic::QuicStreamId id, spdy::SpdyHeaderBlock headers, bool fin, - const spdy::SpdyStreamPrecedence& precedence, - quic::QuicReferenceCountedPointer ack_listener) { - ASSERT(!quic::VersionUsesHttp3(transport_version())); - // gQUIC headers are sent on a dedicated stream. Only count the bytes sent against - // connection level watermark buffer. Do not count them into stream level - // watermark buffer, because it is impossible to identify which byte belongs - // to which stream when the buffered bytes are drained in headers stream. - // This updater may be in the scope of another one in OnCanWrite(), in such - // case, this one doesn't update the watermark. - SendBufferMonitor::ScopedWatermarkBufferUpdater updater(headers_stream(), this); - return quic::QuicSpdyClientSession::WriteHeadersOnHeadersStream(id, std::move(headers), fin, - precedence, ack_listener); -} - std::unique_ptr EnvoyQuicClientSession::CreateQuicCryptoStream() { return crypto_stream_factory_.createEnvoyQuicCryptoClientStream( server_id(), this, crypto_config()->proof_verifier()->CreateDefaultContext(), crypto_config(), diff --git a/source/common/quic/envoy_quic_client_session.h b/source/common/quic/envoy_quic_client_session.h index 307a191e36225..def847ea0eb23 100644 --- a/source/common/quic/envoy_quic_client_session.h +++ b/source/common/quic/envoy_quic_client_session.h @@ -64,13 +64,11 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, quic::ConnectionCloseSource source) override; void Initialize() override; void OnCanWrite() override; - void OnGoAway(const quic::QuicGoAwayFrame& frame) override; void OnHttp3GoAway(uint64_t stream_id) override; void OnTlsHandshakeComplete() override; - size_t WriteHeadersOnHeadersStream( - quic::QuicStreamId id, spdy::SpdyHeaderBlock headers, bool fin, - const spdy::SpdyStreamPrecedence& precedence, - quic::QuicReferenceCountedPointer ack_listener) override; + void MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicRstStreamErrorCode error, + quic::QuicStreamOffset bytes_written) override; + void OnRstStream(const quic::QuicRstStreamFrame& frame) override; // quic::QuicSpdyClientSessionBase void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; diff --git a/source/common/quic/envoy_quic_client_stream.cc b/source/common/quic/envoy_quic_client_stream.cc index 804a39934fd85..a4522b6b65485 100644 --- a/source/common/quic/envoy_quic_client_stream.cc +++ b/source/common/quic/envoy_quic_client_stream.cc @@ -9,7 +9,6 @@ #include "quiche/quic/core/quic_session.h" #include "quiche/quic/core/http/quic_header_list.h" #include "quiche/spdy/core/spdy_header_block.h" -#include "source/common/quic/platform/quic_mem_slice_span_impl.h" #if defined(__GNUC__) #pragma GCC diagnostic pop @@ -44,16 +43,6 @@ EnvoyQuicClientStream::EnvoyQuicClientStream( "Send buffer limit should be larger than 8KB."); } -EnvoyQuicClientStream::EnvoyQuicClientStream( - quic::PendingStream* pending, quic::QuicSpdyClientSession* client_session, - quic::StreamType type, Http::Http3::CodecStats& stats, - const envoy::config::core::v3::Http3ProtocolOptions& http3_options) - : quic::QuicSpdyClientStream(pending, client_session, type), - EnvoyQuicStream( - static_cast(GetReceiveWindow().value()), *filterManagerConnection(), - [this]() { runLowWatermarkCallbacks(); }, [this]() { runHighWatermarkCallbacks(); }, - stats, http3_options) {} - Http::Status EnvoyQuicClientStream::encodeHeaders(const Http::RequestHeaderMap& headers, bool end_stream) { // Required headers must be present. This can only happen by some erroneous processing after the @@ -76,6 +65,9 @@ Http::Status EnvoyQuicClientStream::encodeHeaders(const Http::RequestHeaderMap& } } WriteHeaders(std::move(spdy_headers), end_stream, nullptr); + if (local_end_stream_) { + onLocalEndStream(); + } return Http::okStatus(); } @@ -88,13 +80,28 @@ void EnvoyQuicClientStream::encodeData(Buffer::Instance& data, bool end_stream) ASSERT(!local_end_stream_); local_end_stream_ = end_stream; SendBufferMonitor::ScopedWatermarkBufferUpdater updater(this, this); + Buffer::RawSliceVector raw_slices = data.getRawSlices(); + absl::InlinedVector quic_slices; + quic_slices.reserve(raw_slices.size()); + for (auto& slice : raw_slices) { + ASSERT(slice.len_ != 0); + // Move each slice into a stand-alone buffer. + // TODO(danzh): investigate the cost of allocating one buffer per slice. + // If it turns out to be expensive, add a new function to free data in the middle in buffer + // interface and re-design QuicMemSliceImpl. + quic_slices.emplace_back(quic::QuicMemSliceImpl(data, slice.len_)); + } + absl::Span span(quic_slices); // QUIC stream must take all. - WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream); + WriteBodySlices(span, end_stream); if (data.length() > 0) { // Send buffer didn't take all the data, threshold needs to be adjusted. Reset(quic::QUIC_BAD_APPLICATION_PAYLOAD); return; } + if (local_end_stream_) { + onLocalEndStream(); + } } void EnvoyQuicClientStream::encodeTrailers(const Http::RequestTrailerMap& trailers) { @@ -103,6 +110,7 @@ void EnvoyQuicClientStream::encodeTrailers(const Http::RequestTrailerMap& traile ENVOY_STREAM_LOG(debug, "encodeTrailers: {}.", *this, trailers); ScopedWatermarkBufferUpdater updater(this, this); WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr); + onLocalEndStream(); } void EnvoyQuicClientStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) { @@ -145,11 +153,14 @@ void EnvoyQuicClientStream::OnInitialHeadersComplete(bool fin, size_t frame_len, if (fin) { end_stream_decoded_ = true; } + + quic::QuicRstStreamErrorCode transform_rst = quic::QUIC_STREAM_NO_ERROR; std::unique_ptr headers = quicHeadersToEnvoyHeaders( - header_list, *this, filterManagerConnection()->maxIncomingHeadersCount(), details_); + header_list, *this, filterManagerConnection()->maxIncomingHeadersCount(), details_, + transform_rst); if (headers == nullptr) { - onStreamError(close_connection_upon_invalid_header_, quic::QUIC_STREAM_EXCESSIVE_LOAD); + onStreamError(close_connection_upon_invalid_header_, transform_rst); return; } const absl::optional optional_status = @@ -244,13 +255,15 @@ void EnvoyQuicClientStream::maybeDecodeTrailers() { if (sequencer()->IsClosed() && !FinishedReadingTrailers()) { // Only decode trailers after finishing decoding body. end_stream_decoded_ = true; - if (received_trailers().size() > filterManagerConnection()->maxIncomingHeadersCount()) { - details_ = Http3ResponseCodeDetailValues::too_many_trailers; - onStreamError(close_connection_upon_invalid_header_, quic::QUIC_STREAM_EXCESSIVE_LOAD); + quic::QuicRstStreamErrorCode transform_rst = quic::QUIC_STREAM_NO_ERROR; + auto trailers = spdyHeaderBlockToEnvoyTrailers( + received_trailers(), filterManagerConnection()->maxIncomingHeadersCount(), *this, details_, + transform_rst); + if (trailers == nullptr) { + onStreamError(close_connection_upon_invalid_header_, transform_rst); return; } - response_decoder_->decodeTrailers( - spdyHeaderBlockToEnvoyHeaders(received_trailers())); + response_decoder_->decodeTrailers(std::move(trailers)); MarkTrailersConsumed(); } } @@ -281,6 +294,7 @@ void EnvoyQuicClientStream::OnConnectionClosed(quic::QuicErrorCode error, } void EnvoyQuicClientStream::OnClose() { + destroy(); quic::QuicSpdyClientStream::OnClose(); if (isDoingWatermarkAccounting()) { // This is called in the scope of a watermark buffer updater. Clear the @@ -331,5 +345,7 @@ void EnvoyQuicClientStream::onStreamError(absl::optional should_close_conn } } +bool EnvoyQuicClientStream::hasPendingData() { return BufferedDataBytes() > 0; } + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_client_stream.h b/source/common/quic/envoy_quic_client_stream.h index 0e82d8622319e..89b35d8e51650 100644 --- a/source/common/quic/envoy_quic_client_stream.h +++ b/source/common/quic/envoy_quic_client_stream.h @@ -27,9 +27,6 @@ class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, EnvoyQuicClientStream(quic::QuicStreamId id, quic::QuicSpdyClientSession* client_session, quic::StreamType type, Http::Http3::CodecStats& stats, const envoy::config::core::v3::Http3ProtocolOptions& http3_options); - EnvoyQuicClientStream(quic::PendingStream* pending, quic::QuicSpdyClientSession* client_session, - quic::StreamType type, Http::Http3::CodecStats& stats, - const envoy::config::core::v3::Http3ProtocolOptions& http3_options); void setResponseDecoder(Http::ResponseDecoder& decoder) { response_decoder_ = &decoder; } @@ -49,9 +46,6 @@ class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, void resetStream(Http::StreamResetReason reason) override; void setFlushTimeout(std::chrono::milliseconds) override {} - void setAccount(Buffer::BufferMemoryAccountSharedPtr) override { - // TODO(kbaichoo): implement account tracking for QUIC. - } // quic::QuicSpdyStream void OnBodyAvailable() override; void OnStreamReset(const quic::QuicRstStreamFrame& frame) override; @@ -76,6 +70,9 @@ class EnvoyQuicClientStream : public quic::QuicSpdyClientStream, void OnTrailingHeadersComplete(bool fin, size_t frame_len, const quic::QuicHeaderList& header_list) override; + // Http::MultiplexedStreamImplBase + bool hasPendingData() override; + private: QuicFilterManagerConnectionImpl* filterManagerConnection(); diff --git a/source/common/quic/envoy_quic_dispatcher.h b/source/common/quic/envoy_quic_dispatcher.h index 0864a909f42ff..b429e908d11be 100644 --- a/source/common/quic/envoy_quic_dispatcher.h +++ b/source/common/quic/envoy_quic_dispatcher.h @@ -25,21 +25,16 @@ namespace Envoy { namespace Quic { -// Envoy specific provider of server connection id and decision maker of -// accepting new connection or not. +// Dummy implementation only used by Google Quic. class EnvoyQuicCryptoServerStreamHelper : public quic::QuicCryptoServerStreamBase::Helper { public: - ~EnvoyQuicCryptoServerStreamHelper() override = default; - // quic::QuicCryptoServerStream::Helper bool CanAcceptClientHello(const quic::CryptoHandshakeMessage& /*message*/, const quic::QuicSocketAddress& /*client_address*/, const quic::QuicSocketAddress& /*peer_address*/, const quic::QuicSocketAddress& /*self_address*/, std::string* /*error_details*/) const override { - // TODO(danzh): decide to accept or not based on information from given handshake message, i.e. - // user agent and SNI. - return true; + NOT_REACHED_GCOVR_EXCL_LINE; } }; diff --git a/source/common/quic/envoy_quic_proof_source.cc b/source/common/quic/envoy_quic_proof_source.cc index c9ee45eb12000..67d9e0ce3ce55 100644 --- a/source/common/quic/envoy_quic_proof_source.cc +++ b/source/common/quic/envoy_quic_proof_source.cc @@ -28,8 +28,24 @@ EnvoyQuicProofSource::GetCertChain(const quic::QuicSocketAddress& server_address const std::string& chain_str = cert_config.certificateChain(); std::stringstream pem_stream(chain_str); std::vector chain = quic::CertificateView::LoadPemFromStream(&pem_stream); - return quic::QuicReferenceCountedPointer( + + quic::QuicReferenceCountedPointer cert_chain( new quic::ProofSource::Chain(chain)); + std::string error_details; + bssl::UniquePtr cert = parseDERCertificate(cert_chain->certs[0], &error_details); + if (cert == nullptr) { + ENVOY_LOG(warn, absl::StrCat("Invalid leaf cert: ", error_details)); + return nullptr; + } + + bssl::UniquePtr pub_key(X509_get_pubkey(cert.get())); + int sign_alg = deduceSignatureAlgorithmFromPublicKey(pub_key.get(), &error_details); + if (sign_alg == 0) { + ENVOY_LOG(warn, absl::StrCat("Failed to deduce signature algorithm from public key: ", + error_details)); + return nullptr; + } + return cert_chain; } void EnvoyQuicProofSource::signPayload( @@ -82,7 +98,7 @@ EnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddre ENVOY_LOG(trace, "Getting cert chain for {}", hostname); // TODO(danzh) modify QUICHE to make quic session or ALPN accessible to avoid hard-coded ALPN. Network::ConnectionSocketPtr connection_socket = createServerConnectionSocket( - listen_socket_.ioHandle(), server_address, client_address, hostname, "h3-29"); + listen_socket_.ioHandle(), server_address, client_address, hostname, "h3"); const Network::FilterChain* filter_chain = filter_chain_manager_.findFilterChain(*connection_socket); diff --git a/source/common/quic/envoy_quic_proof_source_base.cc b/source/common/quic/envoy_quic_proof_source_base.cc index f6adb247d95ed..a59adcb246def 100644 --- a/source/common/quic/envoy_quic_proof_source_base.cc +++ b/source/common/quic/envoy_quic_proof_source_base.cc @@ -16,59 +16,20 @@ namespace Envoy { namespace Quic { -void EnvoyQuicProofSourceBase::GetProof(const quic::QuicSocketAddress& server_address, - const quic::QuicSocketAddress& client_address, - const std::string& hostname, - const std::string& server_config, +void EnvoyQuicProofSourceBase::GetProof(const quic::QuicSocketAddress& /*server_address*/, + const quic::QuicSocketAddress& /*client_address*/, + const std::string& /*hostname*/, + const std::string& /*server_config*/, quic::QuicTransportVersion /*transport_version*/, - absl::string_view chlo_hash, - std::unique_ptr callback) { - quic::QuicReferenceCountedPointer chain = - GetCertChain(server_address, client_address, hostname); - - if (chain == nullptr || chain->certs.empty()) { - quic::QuicCryptoProof proof; - callback->Run(/*ok=*/false, nullptr, proof, nullptr); - return; - } - size_t payload_size = sizeof(quic::kProofSignatureLabel) + sizeof(uint32_t) + chlo_hash.size() + - server_config.size(); - auto payload = std::make_unique(payload_size); - quic::QuicDataWriter payload_writer(payload_size, payload.get(), - quiche::Endianness::HOST_BYTE_ORDER); - bool success = - payload_writer.WriteBytes(quic::kProofSignatureLabel, sizeof(quic::kProofSignatureLabel)) && - payload_writer.WriteUInt32(chlo_hash.size()) && payload_writer.WriteStringPiece(chlo_hash) && - payload_writer.WriteStringPiece(server_config); - if (!success) { - quic::QuicCryptoProof proof; - callback->Run(/*ok=*/false, nullptr, proof, nullptr); - return; - } - - std::string error_details; - bssl::UniquePtr cert = parseDERCertificate(chain->certs[0], &error_details); - if (cert == nullptr) { - ENVOY_LOG(warn, absl::StrCat("Invalid leaf cert: ", error_details)); - quic::QuicCryptoProof proof; - callback->Run(/*ok=*/false, nullptr, proof, nullptr); - return; - } - - bssl::UniquePtr pub_key(X509_get_pubkey(cert.get())); - int sign_alg = deduceSignatureAlgorithmFromPublicKey(pub_key.get(), &error_details); - if (sign_alg == 0) { - ENVOY_LOG(warn, absl::StrCat("Failed to deduce signature algorithm from public key: ", - error_details)); - quic::QuicCryptoProof proof; - callback->Run(/*ok=*/false, nullptr, proof, nullptr); - return; - } - - auto signature_callback = std::make_unique(std::move(callback), chain); + absl::string_view /*chlo_hash*/, + std::unique_ptr /*callback*/) { + // Only reachable in Google QUIC which is not supported by Envoy. + NOT_REACHED_GCOVR_EXCL_LINE; +} - signPayload(server_address, client_address, hostname, sign_alg, - absl::string_view(payload.get(), payload_size), std::move(signature_callback)); +absl::InlinedVector EnvoyQuicProofSourceBase::SupportedTlsSignatureAlgorithms() const { + // Return empty here to defer rejecting unexpected algorithm to ComputeTlsSignature(). + return {}; } void EnvoyQuicProofSourceBase::ComputeTlsSignature( diff --git a/source/common/quic/envoy_quic_proof_source_base.h b/source/common/quic/envoy_quic_proof_source_base.h index f963edec26e5a..9988924e71d1e 100644 --- a/source/common/quic/envoy_quic_proof_source_base.h +++ b/source/common/quic/envoy_quic_proof_source_base.h @@ -33,8 +33,6 @@ class EnvoyQuicProofSourceDetails : public quic::ProofSource::Details { public: explicit EnvoyQuicProofSourceDetails(const Network::FilterChain& filter_chain) : filter_chain_(filter_chain) {} - EnvoyQuicProofSourceDetails(const EnvoyQuicProofSourceDetails& other) - : filter_chain_(other.filter_chain_) {} const Network::FilterChain& filterChain() const { return filter_chain_; } @@ -50,8 +48,6 @@ class EnvoyQuicProofSourceBase : public quic::ProofSource, ~EnvoyQuicProofSourceBase() override = default; // quic::ProofSource - // Returns a certs chain and its fake SCT "Fake timestamp" and TLS signature wrapped - // in QuicCryptoProof. void GetProof(const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, const std::string& hostname, const std::string& server_config, quic::QuicTransportVersion /*transport_version*/, @@ -65,6 +61,7 @@ class EnvoyQuicProofSourceBase : public quic::ProofSource, const std::string& hostname, uint16_t signature_algorithm, absl::string_view in, std::unique_ptr callback) override; + absl::InlinedVector SupportedTlsSignatureAlgorithms() const override; protected: virtual void signPayload(const quic::QuicSocketAddress& server_address, @@ -72,33 +69,6 @@ class EnvoyQuicProofSourceBase : public quic::ProofSource, const std::string& hostname, uint16_t signature_algorithm, absl::string_view in, std::unique_ptr callback) PURE; - -private: - // Used by GetProof() to get signature. - class SignatureCallback : public quic::ProofSource::SignatureCallback { - public: - // TODO(danzh) Pass in Details to retain the certs chain, and quic::ProofSource::Callback to be - // triggered in Run(). - SignatureCallback(std::unique_ptr callback, - quic::QuicReferenceCountedPointer chain) - : callback_(std::move(callback)), chain_(chain) {} - - // quic::ProofSource::SignatureCallback - void Run(bool ok, std::string signature, std::unique_ptr
details) override { - quic::QuicCryptoProof proof; - if (!ok) { - callback_->Run(false, chain_, proof, nullptr); - return; - } - proof.signature = signature; - proof.leaf_cert_scts = "Fake timestamp"; - callback_->Run(true, chain_, proof, std::move(details)); - } - - private: - std::unique_ptr callback_; - quic::QuicReferenceCountedPointer chain_; - }; }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_proof_verifier.cc b/source/common/quic/envoy_quic_proof_verifier.cc index 2a4b80c76e1aa..4a2c3245180dd 100644 --- a/source/common/quic/envoy_quic_proof_verifier.cc +++ b/source/common/quic/envoy_quic_proof_verifier.cc @@ -28,6 +28,13 @@ quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( sk_X509_push(intermediates.get(), cert.release()); } } + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(certs[0]); + ASSERT(cert_view != nullptr); + int sign_alg = deduceSignatureAlgorithmFromPublicKey(cert_view->public_key(), error_details); + if (sign_alg == 0) { + return quic::QUIC_FAILURE; + } // We down cast rather than add verifyCertChain to Envoy::Ssl::Context because // verifyCertChain uses a bunch of SSL-specific structs which we want to keep // out of the interface definition. @@ -37,9 +44,6 @@ quic::QuicAsyncStatus EnvoyQuicProofVerifier::VerifyCertChain( return quic::QUIC_FAILURE; } - std::unique_ptr cert_view = - quic::CertificateView::ParseSingleCertificate(certs[0]); - ASSERT(cert_view != nullptr); for (const absl::string_view& config_san : cert_view->subject_alt_name_domains()) { if (Extensions::TransportSockets::Tls::DefaultCertValidator::dnsNameMatch(hostname, config_san)) { diff --git a/source/common/quic/envoy_quic_proof_verifier_base.cc b/source/common/quic/envoy_quic_proof_verifier_base.cc index 7f791b0c3f103..0dca3b23c3143 100644 --- a/source/common/quic/envoy_quic_proof_verifier_base.cc +++ b/source/common/quic/envoy_quic_proof_verifier_base.cc @@ -11,59 +11,14 @@ namespace Envoy { namespace Quic { quic::QuicAsyncStatus EnvoyQuicProofVerifierBase::VerifyProof( - const std::string& hostname, const uint16_t port, const std::string& server_config, - quic::QuicTransportVersion /*quic_version*/, absl::string_view chlo_hash, - const std::vector& certs, const std::string& cert_sct, - const std::string& signature, const quic::ProofVerifyContext* context, - std::string* error_details, std::unique_ptr* details, - std::unique_ptr callback) { - if (certs.empty()) { - *error_details = "Received empty cert chain."; - return quic::QUIC_FAILURE; - } - if (!verifySignature(server_config, chlo_hash, certs[0], signature, error_details)) { - return quic::QUIC_FAILURE; - } - - return VerifyCertChain(hostname, port, certs, "", cert_sct, context, error_details, details, - nullptr, std::move(callback)); -} - -bool EnvoyQuicProofVerifierBase::verifySignature(const std::string& server_config, - absl::string_view chlo_hash, - const std::string& cert, - const std::string& signature, - std::string* error_details) { - std::unique_ptr cert_view = - quic::CertificateView::ParseSingleCertificate(cert); - if (cert_view == nullptr) { - *error_details = "Invalid leaf cert."; - return false; - } - int sign_alg = deduceSignatureAlgorithmFromPublicKey(cert_view->public_key(), error_details); - if (sign_alg == 0) { - return false; - } - - size_t payload_size = sizeof(quic::kProofSignatureLabel) + sizeof(uint32_t) + chlo_hash.size() + - server_config.size(); - auto payload = std::make_unique(payload_size); - quic::QuicDataWriter payload_writer(payload_size, payload.get(), - quiche::Endianness::HOST_BYTE_ORDER); - bool success = - payload_writer.WriteBytes(quic::kProofSignatureLabel, sizeof(quic::kProofSignatureLabel)) && - payload_writer.WriteUInt32(chlo_hash.size()) && payload_writer.WriteStringPiece(chlo_hash) && - payload_writer.WriteStringPiece(server_config); - if (!success) { - *error_details = "QuicPacketWriter error."; - return false; - } - bool valid = cert_view->VerifySignature(absl::string_view(payload.get(), payload_size), signature, - sign_alg); - if (!valid) { - *error_details = "Signature is not valid."; - } - return valid; + const std::string& /*hostname*/, const uint16_t /*port*/, const std::string& /*server_config*/, + quic::QuicTransportVersion /*quic_version*/, absl::string_view /*chlo_hash*/, + const std::vector& /*certs*/, const std::string& /*cert_sct*/, + const std::string& /*signature*/, const quic::ProofVerifyContext* /*context*/, + std::string* /*error_details*/, std::unique_ptr* /*details*/, + std::unique_ptr /*callback*/) { + // Only reachable in Google QUIC which is not supported by Envoy. + NOT_REACHED_GCOVR_EXCL_LINE; } } // namespace Quic diff --git a/source/common/quic/envoy_quic_proof_verifier_base.h b/source/common/quic/envoy_quic_proof_verifier_base.h index e3835b917ddae..45bdd2a4161b7 100644 --- a/source/common/quic/envoy_quic_proof_verifier_base.h +++ b/source/common/quic/envoy_quic_proof_verifier_base.h @@ -27,8 +27,6 @@ class EnvoyQuicProofVerifierBase : public quic::ProofVerifier, ~EnvoyQuicProofVerifierBase() override = default; // quic::ProofVerifier - // Return success if the certs chain is valid and signature of { - // server_config + chlo_hash} is valid. Otherwise failure. quic::QuicAsyncStatus VerifyProof(const std::string& hostname, const uint16_t port, const std::string& server_config, quic::QuicTransportVersion /*quic_version*/, absl::string_view chlo_hash, @@ -38,11 +36,6 @@ class EnvoyQuicProofVerifierBase : public quic::ProofVerifier, std::unique_ptr callback) override; std::unique_ptr CreateDefaultContext() override { return nullptr; } - -protected: - virtual bool verifySignature(const std::string& server_config, absl::string_view chlo_hash, - const std::string& cert, const std::string& signature, - std::string* error_details); }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_server_connection.cc b/source/common/quic/envoy_quic_server_connection.cc index f241e7ec17da5..963032e041f00 100644 --- a/source/common/quic/envoy_quic_server_connection.cc +++ b/source/common/quic/envoy_quic_server_connection.cc @@ -29,7 +29,7 @@ bool EnvoyQuicServerConnection::OnPacketHeader(const quic::QuicPacketHeader& hea } // Update local address if QUICHE has updated the self address. ASSERT(self_address().IsInitialized()); - connectionSocket()->addressProvider().setLocalAddress( + connectionSocket()->connectionInfoProvider().setLocalAddress( quicAddressToEnvoyAddressInstance(self_address())); return true; diff --git a/source/common/quic/envoy_quic_server_session.cc b/source/common/quic/envoy_quic_server_session.cc index ca373f22547a1..15d1d28745dd3 100644 --- a/source/common/quic/envoy_quic_server_session.cc +++ b/source/common/quic/envoy_quic_server_session.cc @@ -98,26 +98,12 @@ void EnvoyQuicServerSession::Initialize() { } void EnvoyQuicServerSession::OnCanWrite() { - if (quic::VersionUsesHttp3(transport_version())) { - quic::QuicServerSessionBase::OnCanWrite(); - } else { - SendBufferMonitor::ScopedWatermarkBufferUpdater updater(headers_stream(), this); - quic::QuicServerSessionBase::OnCanWrite(); - } + quic::QuicServerSessionBase::OnCanWrite(); // Do not update delay close state according to connection level packet egress because that is // equivalent to TCP transport layer egress. But only do so if the session gets chance to write. maybeApplyDelayClosePolicy(); } -void EnvoyQuicServerSession::SetDefaultEncryptionLevel(quic::EncryptionLevel level) { - quic::QuicServerSessionBase::SetDefaultEncryptionLevel(level); - if (level != quic::ENCRYPTION_FORWARD_SECURE) { - return; - } - // This is only reached once, when handshake is done. - raiseConnectionEvent(Network::ConnectionEvent::Connected); -} - bool EnvoyQuicServerSession::hasDataToWrite() { return HasDataToWrite(); } const quic::QuicConnection* EnvoyQuicServerSession::quicConnection() const { @@ -133,22 +119,6 @@ void EnvoyQuicServerSession::OnTlsHandshakeComplete() { raiseConnectionEvent(Network::ConnectionEvent::Connected); } -size_t EnvoyQuicServerSession::WriteHeadersOnHeadersStream( - quic::QuicStreamId id, spdy::SpdyHeaderBlock headers, bool fin, - const spdy::SpdyStreamPrecedence& precedence, - quic::QuicReferenceCountedPointer ack_listener) { - ASSERT(!quic::VersionUsesHttp3(transport_version())); - // gQUIC headers are sent on a dedicated stream. Only count the bytes sent against - // connection level watermark buffer. Do not count them into stream level - // watermark buffer, because it is impossible to identify which byte belongs - // to which stream when the buffered bytes are drained in headers stream. - // This updater may be in the scope of another one in OnCanWrite(), in such - // case, this one doesn't update the watermark. - SendBufferMonitor::ScopedWatermarkBufferUpdater updater(headers_stream(), this); - return quic::QuicServerSessionBase::WriteHeadersOnHeadersStream(id, std::move(headers), fin, - precedence, ack_listener); -} - void EnvoyQuicServerSession::MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicRstStreamErrorCode error, quic::QuicStreamOffset bytes_written) { diff --git a/source/common/quic/envoy_quic_server_session.h b/source/common/quic/envoy_quic_server_session.h index 9e6bedb7aac21..a3a804023aef9 100644 --- a/source/common/quic/envoy_quic_server_session.h +++ b/source/common/quic/envoy_quic_server_session.h @@ -70,12 +70,6 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, void MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicRstStreamErrorCode error, quic::QuicStreamOffset bytes_written) override; void OnRstStream(const quic::QuicRstStreamFrame& frame) override; - // quic::QuicSpdySession - void SetDefaultEncryptionLevel(quic::EncryptionLevel level) override; - size_t WriteHeadersOnHeadersStream( - quic::QuicStreamId id, spdy::SpdyHeaderBlock headers, bool fin, - const spdy::SpdyStreamPrecedence& precedence, - quic::QuicReferenceCountedPointer ack_listener) override; void setHeadersWithUnderscoreAction( envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction diff --git a/source/common/quic/envoy_quic_server_stream.cc b/source/common/quic/envoy_quic_server_stream.cc index aa100414b93be..f5249c30375c1 100644 --- a/source/common/quic/envoy_quic_server_stream.cc +++ b/source/common/quic/envoy_quic_server_stream.cc @@ -14,7 +14,6 @@ #include "quiche/quic/core/http/quic_header_list.h" #include "quiche/quic/core/quic_session.h" #include "quiche/spdy/core/spdy_header_block.h" -#include "source/common/quic/platform/quic_mem_slice_span_impl.h" #if defined(__GNUC__) #pragma GCC diagnostic pop @@ -49,22 +48,6 @@ EnvoyQuicServerStream::EnvoyQuicServerStream( "Send buffer limit should be larger than 8KB."); } -EnvoyQuicServerStream::EnvoyQuicServerStream( - quic::PendingStream* pending, quic::QuicSpdySession* session, quic::StreamType type, - Http::Http3::CodecStats& stats, - const envoy::config::core::v3::Http3ProtocolOptions& http3_options, - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action) - : quic::QuicSpdyServerStreamBase(pending, session, type), - EnvoyQuicStream( - // This should be larger than 8k to fully utilize congestion control - // window. And no larger than the max stream flow control window for - // the stream to buffer all the data. - static_cast(GetReceiveWindow().value()), *filterManagerConnection(), - [this]() { runLowWatermarkCallbacks(); }, [this]() { runHighWatermarkCallbacks(); }, - stats, http3_options), - headers_with_underscores_action_(headers_with_underscores_action) {} - void EnvoyQuicServerStream::encode100ContinueHeaders(const Http::ResponseHeaderMap& headers) { ASSERT(headers.Status()->value() == "100"); encodeHeaders(headers, false); @@ -72,18 +55,13 @@ void EnvoyQuicServerStream::encode100ContinueHeaders(const Http::ResponseHeaderM void EnvoyQuicServerStream::encodeHeaders(const Http::ResponseHeaderMap& headers, bool end_stream) { ENVOY_STREAM_LOG(debug, "encodeHeaders (end_stream={}) {}.", *this, end_stream, headers); - // QUICHE guarantees to take all the headers. This could cause infinite data to - // be buffered on headers stream in Google QUIC implementation because - // headers stream doesn't have upper bound for its send buffer. But in IETF - // QUIC implementation this is safe as headers are sent on data stream which - // is bounded by max concurrent streams limited. - // Same vulnerability exists in crypto stream which can infinitely buffer data - // if handshake implementation goes wrong. - // TODO(#8826) Modify QUICHE to have an upper bound for header stream send buffer. // This is counting not serialized bytes in the send buffer. local_end_stream_ = end_stream; SendBufferMonitor::ScopedWatermarkBufferUpdater updater(this, this); WriteHeaders(envoyHeadersToSpdyHeaderBlock(headers), end_stream, nullptr); + if (local_end_stream_) { + onLocalEndStream(); + } } void EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) { @@ -95,13 +73,28 @@ void EnvoyQuicServerStream::encodeData(Buffer::Instance& data, bool end_stream) ASSERT(!local_end_stream_); local_end_stream_ = end_stream; SendBufferMonitor::ScopedWatermarkBufferUpdater updater(this, this); + Buffer::RawSliceVector raw_slices = data.getRawSlices(); + absl::InlinedVector quic_slices; + quic_slices.reserve(raw_slices.size()); + for (auto& slice : raw_slices) { + ASSERT(slice.len_ != 0); + // Move each slice into a stand-alone buffer. + // TODO(danzh): investigate the cost of allocating one buffer per slice. + // If it turns out to be expensive, add a new function to free data in the middle in buffer + // interface and re-design QuicMemSliceImpl. + quic_slices.emplace_back(quic::QuicMemSliceImpl(data, slice.len_)); + } + absl::Span span(quic_slices); // QUIC stream must take all. - WriteBodySlices(quic::QuicMemSliceSpan(quic::QuicMemSliceSpanImpl(data)), end_stream); + WriteBodySlices(span, end_stream); if (data.length() > 0) { // Send buffer didn't take all the data, threshold needs to be adjusted. Reset(quic::QUIC_BAD_APPLICATION_PAYLOAD); return; } + if (local_end_stream_) { + onLocalEndStream(); + } } void EnvoyQuicServerStream::encodeTrailers(const Http::ResponseTrailerMap& trailers) { @@ -110,6 +103,7 @@ void EnvoyQuicServerStream::encodeTrailers(const Http::ResponseTrailerMap& trail ENVOY_STREAM_LOG(debug, "encodeTrailers: {}.", *this, trailers); SendBufferMonitor::ScopedWatermarkBufferUpdater updater(this, this); WriteTrailers(envoyHeadersToSpdyHeaderBlock(trailers), nullptr); + onLocalEndStream(); } void EnvoyQuicServerStream::encodeMetadata(const Http::MetadataMapVector& /*metadata_map_vector*/) { @@ -119,6 +113,10 @@ void EnvoyQuicServerStream::encodeMetadata(const Http::MetadataMapVector& /*meta } void EnvoyQuicServerStream::resetStream(Http::StreamResetReason reason) { + if (buffer_memory_account_) { + buffer_memory_account_->clearDownstream(); + } + if (local_end_stream_ && !reading_stopped()) { // This is after 200 early response. Reset with QUIC_STREAM_NO_ERROR instead // of propagating original reset reason. In QUICHE if a stream stops reading @@ -161,11 +159,12 @@ void EnvoyQuicServerStream::OnInitialHeadersComplete(bool fin, size_t frame_len, if (fin) { end_stream_decoded_ = true; } + quic::QuicRstStreamErrorCode rst = quic::QUIC_STREAM_NO_ERROR; std::unique_ptr headers = quicHeadersToEnvoyHeaders( - header_list, *this, filterManagerConnection()->maxIncomingHeadersCount(), details_); + header_list, *this, filterManagerConnection()->maxIncomingHeadersCount(), details_, rst); if (headers == nullptr) { - onStreamError(close_connection_upon_invalid_header_); + onStreamError(close_connection_upon_invalid_header_, rst); return; } if (Http::HeaderUtility::requestHeadersValid(*headers) != absl::nullopt) { @@ -245,13 +244,15 @@ void EnvoyQuicServerStream::maybeDecodeTrailers() { if (sequencer()->IsClosed() && !FinishedReadingTrailers()) { // Only decode trailers after finishing decoding body. end_stream_decoded_ = true; - if (received_trailers().size() > filterManagerConnection()->maxIncomingHeadersCount()) { - details_ = Http3ResponseCodeDetailValues::too_many_trailers; - onStreamError(close_connection_upon_invalid_header_); + quic::QuicRstStreamErrorCode rst = quic::QUIC_STREAM_NO_ERROR; + auto trailers = spdyHeaderBlockToEnvoyTrailers( + received_trailers(), filterManagerConnection()->maxIncomingHeadersCount(), *this, details_, + rst); + if (trailers == nullptr) { + onStreamError(close_connection_upon_invalid_header_, rst); return; } - request_decoder_->decodeTrailers( - spdyHeaderBlockToEnvoyHeaders(received_trailers())); + request_decoder_->decodeTrailers(std::move(trailers)); MarkTrailersConsumed(); } } @@ -262,22 +263,27 @@ bool EnvoyQuicServerStream::OnStopSending(quic::QuicRstStreamErrorCode error) { stats_.rx_reset_.inc(); bool end_stream_encoded = local_end_stream_; // This call will close write. - bool ret = quic::QuicSpdyServerStreamBase::OnStopSending(error); + if (!quic::QuicSpdyServerStreamBase::OnStopSending(error)) { + return false; + } ASSERT(write_side_closed()); - if (read_side_closed() && !end_stream_encoded) { + // Also stop reading because the peer already didn't care about the response any more. + if (!reading_stopped()) { + StopReading(); + } + if (!end_stream_encoded) { // If both directions are closed but end stream hasn't been encoded yet, notify reset callbacks. // Treat this as a remote reset, since the stream will be closed in both directions. runResetCallbacks(quicRstErrorToEnvoyRemoteResetReason(error)); } - return ret; + return true; } void EnvoyQuicServerStream::OnStreamReset(const quic::QuicRstStreamFrame& frame) { ENVOY_STREAM_LOG(debug, "received RESET_STREAM with reset code={}", *this, frame.error_code); stats_.rx_reset_.inc(); bool end_stream_decoded_and_encoded = read_side_closed() && local_end_stream_; - // This closes read side in both Google Quic and IETF Quic, but doesn't close write side in IETF - // Quic. + // This closes read side in IETF Quic, but doesn't close write side. quic::QuicSpdyServerStreamBase::OnStreamReset(frame); ASSERT(read_side_closed()); if (write_side_closed() && !end_stream_decoded_and_encoded) { @@ -290,8 +296,10 @@ void EnvoyQuicServerStream::OnStreamReset(const quic::QuicRstStreamFrame& frame) void EnvoyQuicServerStream::Reset(quic::QuicRstStreamErrorCode error) { ENVOY_STREAM_LOG(debug, "sending reset code={}", *this, error); stats_.tx_reset_.inc(); - // Upper layers expect calling resetStream() to immediately raise reset callbacks. - runResetCallbacks(quicRstErrorToEnvoyLocalResetReason(error)); + if (!local_end_stream_) { + // Upper layers expect calling resetStream() to immediately raise reset callbacks. + runResetCallbacks(quicRstErrorToEnvoyLocalResetReason(error)); + } quic::QuicSpdyServerStreamBase::Reset(error); } @@ -307,7 +315,23 @@ void EnvoyQuicServerStream::OnConnectionClosed(quic::QuicErrorCode error, quic::QuicSpdyServerStreamBase::OnConnectionClosed(error, source); } +void EnvoyQuicServerStream::CloseWriteSide() { + // Clear the downstream since the stream should not write additional data + // after this is called, e.g. cannot reset the stream. + // Only the downstream stream should clear the downstream of the + // memory account. + // + // There are cases where a corresponding upstream stream dtor might + // be called, but the downstream stream isn't going to terminate soon + // such as StreamDecoderFilterCallbacks::recreateStream(). + if (buffer_memory_account_) { + buffer_memory_account_->clearDownstream(); + } + quic::QuicSpdyServerStreamBase::CloseWriteSide(); +} + void EnvoyQuicServerStream::OnClose() { + destroy(); quic::QuicSpdyServerStreamBase::OnClose(); if (isDoingWatermarkAccounting()) { return; @@ -337,7 +361,7 @@ QuicFilterManagerConnectionImpl* EnvoyQuicServerStream::filterManagerConnection( } Http::HeaderUtility::HeaderValidationResult -EnvoyQuicServerStream::validateHeader(const std::string& header_name, +EnvoyQuicServerStream::validateHeader(absl::string_view header_name, absl::string_view header_value) { Http::HeaderUtility::HeaderValidationResult result = EnvoyQuicStream::validateHeader(header_name, header_value); @@ -354,7 +378,8 @@ EnvoyQuicServerStream::validateHeader(const std::string& header_name, return result; } -void EnvoyQuicServerStream::onStreamError(absl::optional should_close_connection) { +void EnvoyQuicServerStream::onStreamError(absl::optional should_close_connection, + quic::QuicRstStreamErrorCode rst) { if (details_.empty()) { details_ = Http3ResponseCodeDetailValues::invalid_http_header; } @@ -369,9 +394,25 @@ void EnvoyQuicServerStream::onStreamError(absl::optional should_close_conn if (close_connection_upon_invalid_header) { stream_delegate()->OnStreamError(quic::QUIC_HTTP_FRAME_ERROR, "Invalid headers"); } else { - Reset(quic::QUIC_BAD_APPLICATION_PAYLOAD); + Reset(rst); } } +void EnvoyQuicServerStream::onPendingFlushTimer() { + ENVOY_STREAM_LOG(debug, "pending stream flush timeout", *this); + Http::MultiplexedStreamImplBase::onPendingFlushTimer(); + stats_.tx_flush_timeout_.inc(); + ASSERT(local_end_stream_ && !fin_sent()); + // Reset the stream locally. But no reset callbacks will be run because higher layers think the + // stream is already finished. + Reset(quic::QUIC_STREAM_CANCELLED); +} + +bool EnvoyQuicServerStream::hasPendingData() { + // Quic stream sends headers and trailers on the same stream, and buffers them in the same sending + // buffer if needed. So checking this buffer is sufficient. + return BufferedDataBytes() > 0; +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_server_stream.h b/source/common/quic/envoy_quic_server_stream.h index e0b98e835fdad..45aecf8843f35 100644 --- a/source/common/quic/envoy_quic_server_stream.h +++ b/source/common/quic/envoy_quic_server_stream.h @@ -28,12 +28,6 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action); - EnvoyQuicServerStream(quic::PendingStream* pending, quic::QuicSpdySession* session, - quic::StreamType type, Http::Http3::CodecStats& stats, - const envoy::config::core::v3::Http3ProtocolOptions& http3_options, - envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction - headers_with_underscores_action); - void setRequestDecoder(Http::RequestDecoder& decoder) { request_decoder_ = &decoder; } // Http::StreamEncoder @@ -51,13 +45,7 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, // Http::Stream void resetStream(Http::StreamResetReason reason) override; - void setFlushTimeout(std::chrono::milliseconds) override { - // TODO(mattklein123): Actually implement this for HTTP/3 similar to HTTP/2. - } - void setAccount(Buffer::BufferMemoryAccountSharedPtr) override { - // TODO(kbaichoo): implement account tracking for QUIC. - } // quic::QuicSpdyStream void OnBodyAvailable() override; bool OnStopSending(quic::QuicRstStreamErrorCode error) override; @@ -67,12 +55,13 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, void OnCanWrite() override; // quic::QuicSpdyServerStreamBase void OnConnectionClosed(quic::QuicErrorCode error, quic::ConnectionCloseSource source) override; + void CloseWriteSide() override; void clearWatermarkBuffer(); // EnvoyQuicStream Http::HeaderUtility::HeaderValidationResult - validateHeader(const std::string& header_name, absl::string_view header_value) override; + validateHeader(absl::string_view header_name, absl::string_view header_value) override; protected: // EnvoyQuicStream @@ -87,6 +76,10 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, const quic::QuicHeaderList& header_list) override; void OnHeadersTooLarge() override; + // Http::MultiplexedStreamImplBase + void onPendingFlushTimer() override; + bool hasPendingData() override; + private: QuicFilterManagerConnectionImpl* filterManagerConnection(); @@ -95,7 +88,8 @@ class EnvoyQuicServerStream : public quic::QuicSpdyServerStreamBase, // Either reset the stream or close the connection according to // should_close_connection and configured http3 options. - void onStreamError(absl::optional should_close_connection); + void onStreamError(absl::optional should_close_connection, + quic::QuicRstStreamErrorCode rst = quic::QUIC_BAD_APPLICATION_PAYLOAD); Http::RequestDecoder* request_decoder_{nullptr}; envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction diff --git a/source/common/quic/envoy_quic_stream.h b/source/common/quic/envoy_quic_stream.h index f28cae4cf0119..4f1669f493efb 100644 --- a/source/common/quic/envoy_quic_stream.h +++ b/source/common/quic/envoy_quic_stream.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/buffer/buffer.h" #include "envoy/config/core/v3/protocol.pb.h" #include "envoy/event/dispatcher.h" #include "envoy/http/codec.h" @@ -15,8 +16,7 @@ namespace Quic { // Base class for EnvoyQuicServer|ClientStream. class EnvoyQuicStream : public virtual Http::StreamEncoder, - public Http::Stream, - public Http::StreamCallbackHelper, + public Http::MultiplexedStreamImplBase, public SendBufferMonitor, public HeaderValidator, protected Logger::Loggable { @@ -27,7 +27,8 @@ class EnvoyQuicStream : public virtual Http::StreamEncoder, std::function below_low_watermark, std::function above_high_watermark, Http::Http3::CodecStats& stats, const envoy::config::core::v3::Http3ProtocolOptions& http3_options) - : stats_(stats), http3_options_(http3_options), + : Http::MultiplexedStreamImplBase(filter_manager_connection.dispatcher()), stats_(stats), + http3_options_(http3_options), send_buffer_simulation_(buffer_limit / 2, buffer_limit, std::move(below_low_watermark), std::move(above_high_watermark), ENVOY_LOGGER()), filter_manager_connection_(filter_manager_connection), @@ -76,7 +77,11 @@ class EnvoyQuicStream : public virtual Http::StreamEncoder, } uint32_t bufferLimit() override { return send_buffer_simulation_.highWatermark(); } const Network::Address::InstanceConstSharedPtr& connectionLocalAddress() override { - return connection()->addressProvider().localAddress(); + return connection()->connectionInfoProvider().localAddress(); + } + + void setAccount(Buffer::BufferMemoryAccountSharedPtr account) override { + buffer_memory_account_ = account; } // SendBufferMonitor @@ -95,9 +100,12 @@ class EnvoyQuicStream : public virtual Http::StreamEncoder, } Http::HeaderUtility::HeaderValidationResult - validateHeader(const std::string& header_name, absl::string_view header_value) override { + validateHeader(absl::string_view header_name, absl::string_view header_value) override { bool override_stream_error_on_invalid_http_message = http3_options_.override_stream_error_on_invalid_http_message().value(); + if (!Http::HeaderUtility::headerValueIsValid(header_value)) { + return Http::HeaderUtility::HeaderValidationResult::REJECT; + } if (header_name == "content-length") { return Http::HeaderUtility::validateContentLength( header_value, override_stream_error_on_invalid_http_message, @@ -130,6 +138,9 @@ class EnvoyQuicStream : public virtual Http::StreamEncoder, const envoy::config::core::v3::Http3ProtocolOptions& http3_options_; bool close_connection_upon_invalid_header_{false}; absl::string_view details_; + // TODO(kbaichoo): bind the account to the QUIC buffers to enable tracking of + // memory allocated within QUIC buffers. + Buffer::BufferMemoryAccountSharedPtr buffer_memory_account_ = nullptr; private: // Keeps track of bytes buffered in the stream send buffer in QUICHE and reacts diff --git a/source/common/quic/envoy_quic_utils.cc b/source/common/quic/envoy_quic_utils.cc index 769f8cf086497..9557a77a2eb81 100644 --- a/source/common/quic/envoy_quic_utils.cc +++ b/source/common/quic/envoy_quic_utils.cc @@ -73,6 +73,7 @@ quic::QuicRstStreamErrorCode envoyResetReasonToQuicRstError(Http::StreamResetRea case Http::StreamResetReason::ConnectionTermination: return quic::QUIC_STREAM_CONNECTION_ERROR; case Http::StreamResetReason::LocalReset: + case Http::StreamResetReason::OverloadManager: return quic::QUIC_STREAM_CANCELLED; default: return quic::QUIC_BAD_APPLICATION_PAYLOAD; @@ -125,15 +126,6 @@ Http::StreamResetReason quicErrorCodeToEnvoyRemoteResetReason(quic::QuicErrorCod } } -Http::GoAwayErrorCode quicErrorCodeToEnvoyErrorCode(quic::QuicErrorCode error) noexcept { - switch (error) { - case quic::QUIC_NO_ERROR: - return Http::GoAwayErrorCode::NoError; - default: - return Http::GoAwayErrorCode::Other; - } -} - Network::ConnectionSocketPtr createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, Network::Address::InstanceConstSharedPtr& local_addr, @@ -156,7 +148,7 @@ createConnectionSocket(Network::Address::InstanceConstSharedPtr& peer_addr, } connection_socket->bind(local_addr); ASSERT(local_addr->ip()); - local_addr = connection_socket->addressProvider().localAddress(); + local_addr = connection_socket->connectionInfoProvider().localAddress(); if (!Network::Socket::applyOptions(connection_socket->options(), *connection_socket, envoy::config::core::v3::SocketOption::STATE_BOUND)) { ENVOY_LOG_MISC(error, "Fail to apply post-bind options"); diff --git a/source/common/quic/envoy_quic_utils.h b/source/common/quic/envoy_quic_utils.h index 434fd939572c1..b07324b049bcd 100644 --- a/source/common/quic/envoy_quic_utils.h +++ b/source/common/quic/envoy_quic_utils.h @@ -66,18 +66,20 @@ class HeaderValidator { public: virtual ~HeaderValidator() = default; virtual Http::HeaderUtility::HeaderValidationResult - validateHeader(const std::string& header_name, absl::string_view header_value) = 0; + validateHeader(absl::string_view name, absl::string_view header_value) = 0; }; // The returned header map has all keys in lower case. template std::unique_ptr quicHeadersToEnvoyHeaders(const quic::QuicHeaderList& header_list, HeaderValidator& validator, - uint32_t max_headers_allowed, absl::string_view& details) { + uint32_t max_headers_allowed, absl::string_view& details, + quic::QuicRstStreamErrorCode& rst) { auto headers = T::create(); for (const auto& entry : header_list) { if (max_headers_allowed == 0) { details = Http3ResponseCodeDetailValues::too_many_headers; + rst = quic::QUIC_STREAM_EXCESSIVE_LOAD; return nullptr; } max_headers_allowed--; @@ -85,6 +87,7 @@ quicHeadersToEnvoyHeaders(const quic::QuicHeaderList& header_list, HeaderValidat validator.validateHeader(entry.first, entry.second); switch (result) { case Http::HeaderUtility::HeaderValidationResult::REJECT: + rst = quic::QUIC_BAD_APPLICATION_PAYLOAD; // The validator sets the details to Http3ResponseCodeDetailValues::invalid_underscore return nullptr; case Http::HeaderUtility::HeaderValidationResult::DROP: @@ -105,15 +108,39 @@ quicHeadersToEnvoyHeaders(const quic::QuicHeaderList& header_list, HeaderValidat } template -std::unique_ptr spdyHeaderBlockToEnvoyHeaders(const spdy::SpdyHeaderBlock& header_block) { +std::unique_ptr +spdyHeaderBlockToEnvoyTrailers(const spdy::SpdyHeaderBlock& header_block, + uint32_t max_headers_allowed, HeaderValidator& validator, + absl::string_view& details, quic::QuicRstStreamErrorCode& rst) { auto headers = T::create(); + if (header_block.size() > max_headers_allowed) { + details = Http3ResponseCodeDetailValues::too_many_trailers; + rst = quic::QUIC_STREAM_EXCESSIVE_LOAD; + return nullptr; + } for (auto entry : header_block) { // TODO(danzh): Avoid temporary strings and addCopy() with string_view. std::string key(entry.first); // QUICHE coalesces multiple trailer values with the same key with '\0'. std::vector values = absl::StrSplit(entry.second, '\0'); for (const absl::string_view& value : values) { - headers->addCopy(Http::LowerCaseString(key), value); + if (max_headers_allowed == 0) { + details = Http3ResponseCodeDetailValues::too_many_trailers; + rst = quic::QUIC_STREAM_EXCESSIVE_LOAD; + return nullptr; + } + max_headers_allowed--; + Http::HeaderUtility::HeaderValidationResult result = + validator.validateHeader(entry.first, value); + switch (result) { + case Http::HeaderUtility::HeaderValidationResult::REJECT: + rst = quic::QUIC_BAD_APPLICATION_PAYLOAD; + return nullptr; + case Http::HeaderUtility::HeaderValidationResult::DROP: + continue; + case Http::HeaderUtility::HeaderValidationResult::ACCEPT: + headers->addCopy(Http::LowerCaseString(key), value); + } } } return headers; @@ -136,10 +163,6 @@ Http::StreamResetReason quicErrorCodeToEnvoyLocalResetReason(quic::QuicErrorCode // Called when underlying QUIC connection is closed by peer. Http::StreamResetReason quicErrorCodeToEnvoyRemoteResetReason(quic::QuicErrorCode error); -// Called when a GOAWAY frame is received. -ABSL_MUST_USE_RESULT -Http::GoAwayErrorCode quicErrorCodeToEnvoyErrorCode(quic::QuicErrorCode error) noexcept; - // Create a connection socket instance and apply given socket options to the // socket. IP_PKTINFO and SO_RXQ_OVFL is always set if supported. Network::ConnectionSocketPtr diff --git a/source/common/quic/platform/BUILD b/source/common/quic/platform/BUILD index ef8aa550ab179..60fcfe741ae02 100644 --- a/source/common/quic/platform/BUILD +++ b/source/common/quic/platform/BUILD @@ -13,7 +13,7 @@ envoy_package() # used in 2 different ways: # # Most of them are not to be consumed or referenced directly by other Envoy code. -# Their only consumers should be build rules under @com_googlesource_quiche//..., +# Their only consumers should be build rules under @com_github_google_quiche//..., # and tests. In a monorepo, this would be enforced via visibility attribute, but # Bazel does not support limiting visibility to specific external dependencies. # @@ -25,8 +25,8 @@ envoy_package() # These implementations are tested through their APIs with tests mostly brought in from # QUICHE, thus new unit tests for them are deliberately omitted in Envoy tree. These -# tests are added to @com_googlesource_quiche//:quic_platform_api_test. And all tests -# under @com_googlesource_quiche// are configured in test/coverage/gen_build.sh to run in +# tests are added to @com_github_google_quiche//:quic_platform_api_test. And all tests +# under @com_github_google_quiche// are configured in test/coverage/gen_build.sh to run in # CI. # For some APIs which are not covered in QUICHE tests, their tests is added into # //test/common/quic/platform/. @@ -44,8 +44,8 @@ envoy_cc_library( visibility = ["//visibility:public"], deps = [ "//source/common/common:assert_lib", - "@com_googlesource_quiche//:quic_core_flags_list_lib", - "@com_googlesource_quiche//:quic_core_protocol_flags_list_lib", + "@com_github_google_quiche//:quic_core_flags_list_lib", + "@com_github_google_quiche//:quic_core_protocol_flags_list_lib", ], ) @@ -140,11 +140,11 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/server:backtrace_lib", + "@com_github_google_quiche//:quic_core_buffer_allocator_lib", + "@com_github_google_quiche//:quic_platform_export", + "@com_github_google_quiche//:quic_platform_ip_address_family", + "@com_github_google_quiche//:quiche_common_platform", "@com_google_absl//absl/container:btree", - "@com_googlesource_quiche//:quic_core_buffer_allocator_lib", - "@com_googlesource_quiche//:quic_platform_export", - "@com_googlesource_quiche//:quic_platform_ip_address_family", - "@com_googlesource_quiche//:quiche_common_platform", ], ) @@ -175,43 +175,6 @@ envoy_cc_library( ], ) -envoy_cc_library( - name = "quic_platform_mem_slice_span_impl_lib", - srcs = ["quic_mem_slice_span_impl.cc"], - hdrs = ["quic_mem_slice_span_impl.h"], - copts = select({ - "//bazel:windows_x86_64": [], - "//conditions:default": ["-Wno-unused-parameter"], - }), - tags = ["nofips"], - visibility = ["//visibility:public"], - deps = [ - "//envoy/buffer:buffer_interface", - "@com_googlesource_quiche//:quic_core_types_lib", - "@com_googlesource_quiche//:quic_platform_base", - ], -) - -envoy_cc_library( - name = "quic_platform_mem_slice_storage_impl_lib", - srcs = ["quic_mem_slice_storage_impl.cc"], - hdrs = ["quic_mem_slice_storage_impl.h"], - copts = select({ - "//bazel:windows_x86_64": [], - "//conditions:default": [ - "-Wno-error=invalid-offsetof", - "-Wno-unused-parameter", - ], - }), - tags = ["nofips"], - visibility = ["//visibility:public"], - deps = [ - "@com_googlesource_quiche//:quic_core_buffer_allocator_lib", - "@com_googlesource_quiche//:quic_core_utils_lib", - "@com_googlesource_quiche//:quic_platform_mem_slice_span", - ], -) - envoy_cc_library( name = "quic_platform_udp_socket_impl_lib", hdrs = select({ @@ -231,7 +194,7 @@ envoy_cc_library( visibility = ["//visibility:public"], deps = [ "//envoy/event:dispatcher_interface", - "@com_googlesource_quiche//:quic_core_clock_lib", + "@com_github_google_quiche//:quic_core_clock_lib", ], ) @@ -273,13 +236,6 @@ envoy_cc_library( ":quiche_flags_impl_lib", ":string_utils_lib", "//source/common/common:assert_lib", - "@com_googlesource_quiche//:quiche_common_lib", + "@com_github_google_quiche//:quiche_common_lib", ], ) - -envoy_cc_library( - name = "quiche_common_platform_export_impl_lib", - hdrs = ["quiche_export_impl.h"], - tags = ["nofips"], - visibility = ["//visibility:public"], -) diff --git a/source/common/quic/platform/quic_mem_slice_span_impl.cc b/source/common/quic/platform/quic_mem_slice_span_impl.cc deleted file mode 100644 index 89a7357d37437..0000000000000 --- a/source/common/quic/platform/quic_mem_slice_span_impl.cc +++ /dev/null @@ -1,20 +0,0 @@ -// NOLINT(namespace-envoy) - -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "source/common/quic/platform/quic_mem_slice_span_impl.h" - -#include "quiche/quic/platform/api/quic_mem_slice.h" - -namespace quic { - -// NOLINTNEXTLINE(readability-identifier-naming) -absl::string_view QuicMemSliceSpanImpl::GetData(size_t index) { - Envoy::Buffer::RawSliceVector slices = buffer_->getRawSlices(/*max_slices=*/index + 1); - ASSERT(slices.size() > index); - return {reinterpret_cast(slices[index].mem_), slices[index].len_}; -} - -} // namespace quic diff --git a/source/common/quic/platform/quic_mem_slice_span_impl.h b/source/common/quic/platform/quic_mem_slice_span_impl.h deleted file mode 100644 index d1e9855551282..0000000000000 --- a/source/common/quic/platform/quic_mem_slice_span_impl.h +++ /dev/null @@ -1,94 +0,0 @@ -#pragma once - -// NOLINT(namespace-envoy) - -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "envoy/buffer/buffer.h" - -#include "absl/container/fixed_array.h" -#include "absl/strings/string_view.h" -#include "quiche/quic/core/quic_types.h" -#include "quiche/quic/platform/api/quic_mem_slice.h" - -namespace quic { - -// Implements the interface required by -// https://quiche.googlesource.com/quiche/+/refs/heads/master/quic/platform/api/quic_mem_slice_span.h -// Wraps a Buffer::Instance and deliver its data with minimum number of copies. -class QuicMemSliceSpanImpl { -public: - QuicMemSliceSpanImpl() = default; - /** - * @param buffer has to outlive the life time of this class. - */ - explicit QuicMemSliceSpanImpl(Envoy::Buffer::Instance& buffer) : buffer_(&buffer) {} - explicit QuicMemSliceSpanImpl(QuicMemSliceImpl* slice) - : buffer_(&slice->getSingleSliceBuffer()), mem_slice_(slice) {} - - QuicMemSliceSpanImpl(const QuicMemSliceSpanImpl& other) = default; - QuicMemSliceSpanImpl& operator=(const QuicMemSliceSpanImpl& other) = default; - - QuicMemSliceSpanImpl(QuicMemSliceSpanImpl&& other) noexcept - : buffer_(other.buffer_), mem_slice_(other.mem_slice_) { - other.buffer_ = nullptr; - other.mem_slice_ = nullptr; - } - - QuicMemSliceSpanImpl& operator=(QuicMemSliceSpanImpl&& other) noexcept { - if (this != &other) { - buffer_ = other.buffer_; - mem_slice_ = other.mem_slice_; - other.buffer_ = nullptr; - other.mem_slice_ = nullptr; - } - return *this; - } - - // QuicMemSliceSpan - // NOLINTNEXTLINE(readability-identifier-naming) - absl::string_view GetData(size_t index); - // NOLINTNEXTLINE(readability-identifier-naming) - QuicByteCount total_length() { return buffer_->length(); }; - // NOLINTNEXTLINE(readability-identifier-naming) - size_t NumSlices() { return buffer_->getRawSlices().size(); } - // NOLINTNEXTLINE(readability-identifier-naming) - template QuicByteCount ConsumeAll(ConsumeFunction consume); - bool empty() const { return buffer_->length() == 0; } - -private: - // If constructed with a QuicMemSlice, mem_slice_ point to that object and this points to - // mem_slice_->getSingleSliceBuffer(). If constructed with an Envoy buffer, this points to the - // buffer itself. - Envoy::Buffer::Instance* buffer_{nullptr}; - // If this span is not constructed with a QuicMemSlice, this points to nullptr. - QuicMemSliceImpl* mem_slice_{nullptr}; -}; - -template -// NOLINTNEXTLINE(readability-identifier-naming) -QuicByteCount QuicMemSliceSpanImpl::ConsumeAll(ConsumeFunction consume) { - size_t saved_length = 0; - if (mem_slice_ == nullptr) { - for (auto& slice : buffer_->getRawSlices()) { - if (slice.len_ == 0) { - continue; - } - // Move each slice into a stand-alone buffer. - // TODO(danzh): investigate the cost of allocating one buffer per slice. - // If it turns out to be expensive, add a new function to free data in the middle in buffer - // interface and re-design QuicMemSliceImpl. - consume(QuicMemSlice(QuicMemSliceImpl(*buffer_, slice.len_))); - saved_length += slice.len_; - } - } else { - saved_length += mem_slice_->length(); - consume(quic::QuicMemSlice(std::move(*mem_slice_))); - } - ASSERT(buffer_->length() == 0); - return saved_length; -} - -} // namespace quic diff --git a/source/common/quic/platform/quic_mem_slice_storage_impl.cc b/source/common/quic/platform/quic_mem_slice_storage_impl.cc deleted file mode 100644 index 0f90210eb992b..0000000000000 --- a/source/common/quic/platform/quic_mem_slice_storage_impl.cc +++ /dev/null @@ -1,44 +0,0 @@ -// NOLINT(namespace-envoy) - -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "source/common/quic/platform/quic_mem_slice_storage_impl.h" - -#include - -#include "envoy/buffer/buffer.h" - -#include "quiche/quic/core/quic_utils.h" - -namespace quic { - -// TODO(danzh)Note that |allocator| is not used to allocate memory currently, instead, -// Buffer::OwnedImpl allocates memory on its own. Investigate if a customized -// QuicBufferAllocator can improve cache hit. -QuicMemSliceStorageImpl::QuicMemSliceStorageImpl(const iovec* iov, int iov_count, - QuicBufferAllocator* /*allocator*/, - const QuicByteCount max_slice_len) { - if (iov == nullptr) { - return; - } - QuicByteCount write_len = 0; - for (int i = 0; i < iov_count; ++i) { - write_len += iov[i].iov_len; - } - size_t io_offset = 0; - while (io_offset < write_len) { - size_t slice_len = std::min(write_len - io_offset, max_slice_len); - - // Use a separate slice so that we do not violate the restriction of |max_slice_len| when - // ToSpan() is called. - auto reservation = buffer_.reserveSingleSlice(slice_len, true); - QuicUtils::CopyToBuffer(iov, iov_count, io_offset, slice_len, - static_cast(reservation.slice().mem_)); - io_offset += slice_len; - reservation.commit(slice_len); - } -} - -} // namespace quic diff --git a/source/common/quic/platform/quic_mem_slice_storage_impl.h b/source/common/quic/platform/quic_mem_slice_storage_impl.h deleted file mode 100644 index 797ade760686e..0000000000000 --- a/source/common/quic/platform/quic_mem_slice_storage_impl.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -// NOLINT(namespace-envoy) - -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "source/common/buffer/buffer_impl.h" - -#include "quiche/quic/core/quic_buffer_allocator.h" -#include "quiche/quic/platform/api/quic_iovec.h" -#include "quiche/quic/platform/api/quic_mem_slice_span.h" - -namespace quic { - -// QuicMemSliceStorageImpl wraps a MemSlice vector. -class QuicMemSliceStorageImpl { -public: - QuicMemSliceStorageImpl(const iovec* iov, int iov_count, QuicBufferAllocator* allocator, - const QuicByteCount max_slice_len); - - QuicMemSliceStorageImpl(const QuicMemSliceStorageImpl& other) { buffer_.add(other.buffer_); } - - QuicMemSliceStorageImpl& operator=(const QuicMemSliceStorageImpl& other) { - if (this != &other) { - if (buffer_.length() > 0) { - buffer_.drain(buffer_.length()); - } - buffer_.add(other.buffer_); - } - return *this; - } - QuicMemSliceStorageImpl(QuicMemSliceStorageImpl&& other) = default; - QuicMemSliceStorageImpl& operator=(QuicMemSliceStorageImpl&& other) = default; - - // NOLINTNEXTLINE(readability-identifier-naming) - QuicMemSliceSpan ToSpan() { return QuicMemSliceSpan(QuicMemSliceSpanImpl(buffer_)); } - - // NOLINTNEXTLINE(readability-identifier-naming) - void Append(QuicMemSliceImpl mem_slice) { buffer_.move(mem_slice.getSingleSliceBuffer()); } - -private: - Envoy::Buffer::OwnedImpl buffer_; -}; - -} // namespace quic diff --git a/source/common/quic/platform/quiche_flags_impl.cc b/source/common/quic/platform/quiche_flags_impl.cc index a251af757b085..6dcd07e3f0815 100644 --- a/source/common/quic/platform/quiche_flags_impl.cc +++ b/source/common/quic/platform/quiche_flags_impl.cc @@ -32,6 +32,9 @@ absl::flat_hash_map makeFlagMap() { QUIC_FLAG(FLAGS_quic_restart_flag_http2_testonly_default_false, false) QUIC_FLAG(FLAGS_quic_restart_flag_http2_testonly_default_true, true) #undef QUIC_FLAG + // Disable IETF draft 29 implementation. Envoy only supports RFC-v1. + FLAGS_quic_reloadable_flag_quic_disable_version_draft_29->setValue(true); + FLAGS_quic_reloadable_flag_quic_decline_server_push_stream->setValue(true); #define QUIC_PROTOCOL_FLAG(type, flag, ...) flags.emplace(FLAGS_##flag->name(), FLAGS_##flag); #include "quiche/quic/core/quic_protocol_flags_list.h" diff --git a/source/common/quic/quic_filter_manager_connection_impl.cc b/source/common/quic/quic_filter_manager_connection_impl.cc index 1532078e2e50d..a2049252c60db 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.cc +++ b/source/common/quic/quic_filter_manager_connection_impl.cc @@ -14,7 +14,7 @@ QuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl( : Network::ConnectionImplBase(dispatcher, /*id=*/connection_id.Hash()), network_connection_(&connection), filter_manager_(*this, *connection.connectionSocket()), stream_info_(dispatcher.timeSource(), - connection.connectionSocket()->addressProviderSharedPtr()), + connection.connectionSocket()->connectionInfoProviderSharedPtr()), write_buffer_watermark_simulation_( send_buffer_limit / 2, send_buffer_limit, [this]() { onSendBufferLowWatermark(); }, [this]() { onSendBufferHighWatermark(); }, ENVOY_LOGGER()) { @@ -175,27 +175,11 @@ void QuicFilterManagerConnectionImpl::onConnectionCloseEvent( // The connection was closed before it could be used. Stats are not recorded. return; } - switch (version.transport_version) { - case quic::QUIC_VERSION_43: - codec_stats_->quic_version_43_.inc(); - return; - case quic::QUIC_VERSION_46: - codec_stats_->quic_version_46_.inc(); - return; - case quic::QUIC_VERSION_50: - codec_stats_->quic_version_50_.inc(); - return; - case quic::QUIC_VERSION_51: - codec_stats_->quic_version_51_.inc(); - return; - case quic::QUIC_VERSION_IETF_DRAFT_29: - codec_stats_->quic_version_h3_29_.inc(); - return; - case quic::QUIC_VERSION_IETF_RFC_V1: + if (version.transport_version == quic::QUIC_VERSION_IETF_RFC_V1) { codec_stats_->quic_version_rfc_v1_.inc(); - return; - default: - return; + } else { + ENVOY_BUG(false, fmt::format("Unexpected QUIC version {}", + quic::QuicVersionToString(version.transport_version))); } } diff --git a/source/common/quic/quic_filter_manager_connection_impl.h b/source/common/quic/quic_filter_manager_connection_impl.h index e1a96ea42b90a..6fabaa5ded3ce 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.h +++ b/source/common/quic/quic_filter_manager_connection_impl.h @@ -41,8 +41,6 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, QuicFilterManagerConnectionImpl(QuicNetworkConnection& connection, const quic::QuicConnectionId& connection_id, Event::Dispatcher& dispatcher, uint32_t send_buffer_limit); - ~QuicFilterManagerConnectionImpl() override = default; - // Network::FilterManager // Overridden to delegate calls to filter_manager_. void addWriteFilter(Network::WriteFilterSharedPtr filter) override; @@ -70,11 +68,11 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, void readDisable(bool /*disable*/) override { ASSERT(false); } void detectEarlyCloseWhenReadDisabled(bool /*value*/) override { ASSERT(false); } bool readEnabled() const override { return true; } - const Network::SocketAddressSetter& addressProvider() const override { - return network_connection_->connectionSocket()->addressProvider(); + const Network::ConnectionInfoSetter& connectionInfoProvider() const override { + return network_connection_->connectionSocket()->connectionInfoProvider(); } - Network::SocketAddressProviderSharedPtr addressProviderSharedPtr() const override { - return network_connection_->connectionSocket()->addressProviderSharedPtr(); + Network::ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override { + return network_connection_->connectionSocket()->connectionInfoProviderSharedPtr(); } absl::optional unixSocketPeerCredentials() const override { diff --git a/source/common/quic/send_buffer_monitor.cc b/source/common/quic/send_buffer_monitor.cc index 5a9426db96702..b949f0a897821 100644 --- a/source/common/quic/send_buffer_monitor.cc +++ b/source/common/quic/send_buffer_monitor.cc @@ -7,20 +7,11 @@ SendBufferMonitor::ScopedWatermarkBufferUpdater::ScopedWatermarkBufferUpdater( quic::QuicStream* quic_stream, SendBufferMonitor* send_buffer_monitor) : quic_stream_(quic_stream), old_buffered_bytes_(quic_stream_->BufferedDataBytes()), send_buffer_monitor_(send_buffer_monitor) { - if (!send_buffer_monitor_->is_doing_watermark_accounting_) { - send_buffer_monitor_->is_doing_watermark_accounting_ = true; - count_bytes_ = true; - } else { - ASSERT(static_cast(quic_stream) != static_cast(send_buffer_monitor)); - count_bytes_ = false; - } + ASSERT(!send_buffer_monitor_->is_doing_watermark_accounting_); + send_buffer_monitor_->is_doing_watermark_accounting_ = true; } SendBufferMonitor::ScopedWatermarkBufferUpdater::~ScopedWatermarkBufferUpdater() { - if (!count_bytes_) { - ASSERT(static_cast(quic_stream_) != static_cast(send_buffer_monitor_)); - return; - } // If quic_stream_ is done writing, regards all buffered bytes, if there is any, as drained. uint64_t new_buffered_bytes = quic_stream_->write_side_closed() ? 0u : quic_stream_->BufferedDataBytes(); diff --git a/source/common/quic/send_buffer_monitor.h b/source/common/quic/send_buffer_monitor.h index ea9d99db41faf..db2211bee0397 100644 --- a/source/common/quic/send_buffer_monitor.h +++ b/source/common/quic/send_buffer_monitor.h @@ -32,7 +32,6 @@ class SendBufferMonitor { ~ScopedWatermarkBufferUpdater(); private: - bool count_bytes_{false}; quic::QuicStream* quic_stream_{nullptr}; uint64_t old_buffered_bytes_{0}; SendBufferMonitor* send_buffer_monitor_{nullptr}; diff --git a/source/common/router/BUILD b/source/common/router/BUILD index 524bd27213f19..ffea1d8aa65e4 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -151,7 +151,6 @@ envoy_cc_library( "//source/common/init:target_lib", "//source/common/protobuf:utility_lib", "//source/common/router:route_config_update_impl_lib", - "@envoy_api//envoy/api/v2/route:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", @@ -181,7 +180,6 @@ envoy_cc_library( "//source/common/config:subscription_base_interface", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", - "//source/common/config:version_converter_lib", "//source/common/init:manager_lib", "//source/common/init:target_lib", "//source/common/init:watcher_lib", @@ -189,7 +187,6 @@ envoy_cc_library( "//source/common/router:route_config_update_impl_lib", "//source/common/router:vhds_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", @@ -232,12 +229,10 @@ envoy_cc_library( "//source/common/config:config_provider_lib", "//source/common/config:resource_name_lib", "//source/common/config:subscription_base_interface", - "//source/common/config:version_converter_lib", "//source/common/config:xds_resource_lib", "//source/common/init:manager_lib", "//source/common/init:watcher_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 79ffc9aa09bb6..c3dd32519f74e 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -59,6 +59,16 @@ void mergeTransforms(Http::HeaderTransforms& dest, const Http::HeaderTransforms& src.headers_to_remove.end()); } +const envoy::config::route::v3::WeightedCluster::ClusterWeight& validateWeightedClusterSpecifier( + const envoy::config::route::v3::WeightedCluster::ClusterWeight& cluster) { + if (!cluster.name().empty() && !cluster.cluster_header().empty()) { + throw EnvoyException("Only one of name or cluster_header can be specified"); + } else if (cluster.name().empty() && cluster.cluster_header().empty()) { + throw EnvoyException("At least one of name or cluster_header need to be specified"); + } + return cluster; +} + } // namespace const std::string& OriginalConnectPort::key() { @@ -183,7 +193,7 @@ InternalRedirectPolicyImpl::InternalRedirectPolicyImpl( auto& factory = Envoy::Config::Utility::getAndCheckFactory(predicate); auto config = factory.createEmptyConfigProto(); - Envoy::Config::Utility::translateOpaqueConfig(predicate.typed_config(), {}, validator, *config); + Envoy::Config::Utility::translateOpaqueConfig(predicate.typed_config(), validator, *config); predicate_factories_.emplace_back(&factory, std::move(config)); } } @@ -218,12 +228,11 @@ CorsPolicyImpl::CorsPolicyImpl(const envoy::config::route::v3::CorsPolicy& confi Runtime::Loader& loader) : config_(config), loader_(loader), allow_methods_(config.allow_methods()), allow_headers_(config.allow_headers()), expose_headers_(config.expose_headers()), - max_age_(config.max_age()), - legacy_enabled_(config.has_hidden_envoy_deprecated_enabled() - ? config.hidden_envoy_deprecated_enabled().value() - : true) { + max_age_(config.max_age()) { for (const auto& string_match : config.allow_origin_string_match()) { - allow_origins_.push_back(std::make_unique(string_match)); + allow_origins_.push_back( + std::make_unique>( + string_match)); } if (config.has_allow_credentials()) { allow_credentials_ = PROTOBUF_GET_WRAPPED_REQUIRED(config, allow_credentials); @@ -237,9 +246,6 @@ ShadowPolicyImpl::ShadowPolicyImpl(const RequestMirrorPolicy& config) { if (config.has_runtime_fraction()) { runtime_key_ = config.runtime_fraction().runtime_key(); default_value_ = config.runtime_fraction().default_value(); - } else { - runtime_key_ = config.hidden_envoy_deprecated_runtime_key(); - default_value_.set_numerator(0); } trace_sampled_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, trace_sampled, true); } @@ -357,15 +363,17 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, retry_shadow_buffer_limit_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( route, per_request_buffer_limit_bytes, vhost.retryShadowBufferLimit())), metadata_(route.metadata()), typed_metadata_(route.metadata()), - match_grpc_(route.match().has_grpc()), opaque_config_(parseOpaqueConfig(route)), - decorator_(parseDecorator(route)), route_tracing_(parseRouteTracing(route)), + match_grpc_(route.match().has_grpc()), + dynamic_metadata_(route.match().dynamic_metadata().begin(), + route.match().dynamic_metadata().end()), + opaque_config_(parseOpaqueConfig(route)), decorator_(parseDecorator(route)), + route_tracing_(parseRouteTracing(route)), direct_response_code_(ConfigUtility::parseDirectResponseCode(route)), direct_response_body_(ConfigUtility::parseDirectResponseBody( route, factory_context.api(), vhost_.globalRouteConfig().maxDirectResponseBodySizeBytes())), - per_filter_configs_(route.typed_per_filter_config(), - route.hidden_envoy_deprecated_per_filter_config(), optional_http_filters, - factory_context, validator), + per_filter_configs_(route.typed_per_filter_config(), optional_http_filters, factory_context, + validator), route_name_(route.name()), time_source_(factory_context.dispatcher().timeSource()) { if (route.route().has_metadata_match()) { const auto filter_it = route.route().metadata_match().filter_metadata().find( @@ -376,23 +384,10 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, } if (!route.route().request_mirror_policies().empty()) { - if (route.route().has_hidden_envoy_deprecated_request_mirror_policy()) { - // protobuf does not allow `oneof` to contain a field labeled `repeated`, so we do our own - // xor-like check. - // https://github.com/protocolbuffers/protobuf/issues/2592 - // The alternative solution suggested (wrapping the oneof in a repeated message) would still - // break wire compatibility. - // (see https://github.com/envoyproxy/envoy/issues/439#issuecomment-383622723) - throw EnvoyException("Cannot specify both request_mirror_policy and request_mirror_policies"); - } for (const auto& mirror_policy_config : route.route().request_mirror_policies()) { shadow_policies_.push_back(std::make_unique(mirror_policy_config)); } - } else if (route.route().has_hidden_envoy_deprecated_request_mirror_policy()) { - shadow_policies_.push_back(std::make_unique( - route.route().hidden_envoy_deprecated_request_mirror_policy())); } - // If this is a weighted_cluster, we create N internal route entries // (called WeightedClusterEntry), such that each object is a simple // single cluster, pointing back to the parent. Metadata criteria @@ -502,14 +497,16 @@ bool RouteEntryImplBase::evaluateTlsContextMatch(const StreamInfo::StreamInfo& s const TlsContextMatchCriteria& criteria = *tlsContextMatchCriteria(); if (criteria.presented().has_value()) { - const bool peer_presented = stream_info.downstreamSslConnection() && - stream_info.downstreamSslConnection()->peerCertificatePresented(); + const bool peer_presented = + stream_info.downstreamAddressProvider().sslConnection() && + stream_info.downstreamAddressProvider().sslConnection()->peerCertificatePresented(); matches &= criteria.presented().value() == peer_presented; } if (criteria.validated().has_value()) { - const bool peer_validated = stream_info.downstreamSslConnection() && - stream_info.downstreamSslConnection()->peerCertificateValidated(); + const bool peer_validated = + stream_info.downstreamAddressProvider().sslConnection() && + stream_info.downstreamAddressProvider().sslConnection()->peerCertificateValidated(); matches &= criteria.validated().value() == peer_validated; } @@ -529,17 +526,34 @@ bool RouteEntryImplBase::matchRoute(const Http::RequestHeaderMap& headers, if (match_grpc_) { matches &= Grpc::Common::isGrpcRequestHeaders(headers); + if (!matches) { + return false; + } } matches &= Http::HeaderUtility::matchHeaders(headers, config_headers_); + if (!matches) { + return false; + } if (!config_query_parameters_.empty()) { Http::Utility::QueryParams query_parameters = Http::Utility::parseQueryString(headers.getPathValue()); matches &= ConfigUtility::matchQueryParams(query_parameters, config_query_parameters_); + if (!matches) { + return false; + } } matches &= evaluateTlsContextMatch(stream_info); + for (const auto& m : dynamic_metadata_) { + if (!matches) { + // No need to check anymore as all dynamic metadata matchers must match for a match to occur. + break; + } + matches &= m.match(stream_info.dynamicMetadata()); + } + return matches; } @@ -1005,9 +1019,18 @@ void RouteEntryImplBase::validateClusters( } } -const RouteSpecificFilterConfig* -RouteEntryImplBase::perFilterConfig(const std::string& name) const { - return per_filter_configs_.get(name); +void RouteEntryImplBase::traversePerFilterConfig( + const std::string& filter_name, + std::function cb) const { + auto maybe_vhost_config = vhost_.perFilterConfig(filter_name); + if (maybe_vhost_config != nullptr) { + cb(*maybe_vhost_config); + } + + auto maybe_route_config = per_filter_configs_.get(filter_name); + if (maybe_route_config != nullptr) { + cb(*maybe_route_config); + } } RouteEntryImplBase::WeightedClusterEntry::WeightedClusterEntry( @@ -1016,17 +1039,17 @@ RouteEntryImplBase::WeightedClusterEntry::WeightedClusterEntry( ProtobufMessage::ValidationVisitor& validator, const envoy::config::route::v3::WeightedCluster::ClusterWeight& cluster, const OptionalHttpFilters& optional_http_filters) - : DynamicRouteEntry(parent, cluster.name()), runtime_key_(runtime_key), - loader_(factory_context.runtime()), + : DynamicRouteEntry(parent, validateWeightedClusterSpecifier(cluster).name()), + runtime_key_(runtime_key), loader_(factory_context.runtime()), cluster_weight_(PROTOBUF_GET_WRAPPED_REQUIRED(cluster, weight)), request_headers_parser_(HeaderParser::configure(cluster.request_headers_to_add(), cluster.request_headers_to_remove())), response_headers_parser_(HeaderParser::configure(cluster.response_headers_to_add(), cluster.response_headers_to_remove())), - per_filter_configs_(cluster.typed_per_filter_config(), - cluster.hidden_envoy_deprecated_per_filter_config(), - optional_http_filters, factory_context, validator), - host_rewrite_(cluster.host_rewrite_literal()) { + per_filter_configs_(cluster.typed_per_filter_config(), optional_http_filters, factory_context, + validator), + host_rewrite_(cluster.host_rewrite_literal()), + cluster_header_name_(cluster.cluster_header()) { if (cluster.has_metadata_match()) { const auto filter_it = cluster.metadata_match().filter_metadata().find( Envoy::Config::MetadataFilters::get().ENVOY_LB); @@ -1050,10 +1073,15 @@ Http::HeaderTransforms RouteEntryImplBase::WeightedClusterEntry::responseHeaderT return transforms; } -const RouteSpecificFilterConfig* -RouteEntryImplBase::WeightedClusterEntry::perFilterConfig(const std::string& name) const { - const auto cfg = per_filter_configs_.get(name); - return cfg != nullptr ? cfg : DynamicRouteEntry::perFilterConfig(name); +void RouteEntryImplBase::WeightedClusterEntry::traversePerFilterConfig( + const std::string& filter_name, + std::function cb) const { + DynamicRouteEntry::traversePerFilterConfig(filter_name, cb); + + const auto* cfg = per_filter_configs_.get(filter_name); + if (cfg) { + cb(*cfg); + } } PrefixRouteEntryImpl::PrefixRouteEntryImpl( @@ -1199,16 +1227,14 @@ VirtualHostImpl::VirtualHostImpl( virtual_host.request_headers_to_remove())), response_headers_parser_(HeaderParser::configure(virtual_host.response_headers_to_add(), virtual_host.response_headers_to_remove())), - per_filter_configs_(virtual_host.typed_per_filter_config(), - virtual_host.hidden_envoy_deprecated_per_filter_config(), - optional_http_filters, factory_context, validator), + per_filter_configs_(virtual_host.typed_per_filter_config(), optional_http_filters, + factory_context, validator), retry_shadow_buffer_limit_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( virtual_host, per_request_buffer_limit_bytes, std::numeric_limits::max())), include_attempt_count_in_request_(virtual_host.include_request_attempt_count()), include_attempt_count_in_response_(virtual_host.include_attempt_count_in_response()), virtual_cluster_catch_all_(*vcluster_scope_, factory_context.routerContext().virtualClusterStatNames()) { - switch (virtual_host.require_tls()) { case envoy::config::route::v3::VirtualHost::NONE: ssl_requirements_ = SslRequirements::None; @@ -1465,7 +1491,6 @@ RouteConstSharedPtr RouteMatcher::route(const RouteCallback& cb, const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { - const VirtualHostImpl* virtual_host = findVirtualHost(headers); if (virtual_host) { return virtual_host->getRouteFromEntries(cb, headers, stream_info, random_value); @@ -1475,6 +1500,10 @@ RouteConstSharedPtr RouteMatcher::route(const RouteCallback& cb, } const SslRedirector SslRedirectRoute::SSL_REDIRECTOR; +const envoy::config::core::v3::Metadata SslRedirectRoute::metadata_; +const Envoy::Config::TypedMetadataImpl + SslRedirectRoute::typed_metadata_({}); + const std::shared_ptr VirtualHostImpl::SSL_REDIRECT_ROUTE{ new SslRedirectRoute()}; @@ -1527,7 +1556,7 @@ RouteConstSharedPtr ConfigImpl::route(const RouteCallback& cb, RouteSpecificFilterConfigConstSharedPtr PerFilterConfigs::createRouteSpecificFilterConfig( const std::string& name, const ProtobufWkt::Any& typed_config, - const ProtobufWkt::Struct& config, const OptionalHttpFilters& optional_http_filters, + const OptionalHttpFilters& optional_http_filters, Server::Configuration::ServerFactoryContext& factory_context, ProtobufMessage::ValidationVisitor& validator) { bool is_optional = (optional_http_filters.find(name) != optional_http_filters.end()); @@ -1539,7 +1568,7 @@ RouteSpecificFilterConfigConstSharedPtr PerFilterConfigs::createRouteSpecificFil } ProtobufTypes::MessagePtr proto_config = factory->createEmptyRouteConfigProto(); - Envoy::Config::Utility::translateOpaqueConfig(typed_config, config, validator, *proto_config); + Envoy::Config::Utility::translateOpaqueConfig(typed_config, validator, *proto_config); auto object = factory->createRouteSpecificFilterConfig(*proto_config, factory_context, validator); if (object == nullptr) { if (Runtime::runtimeFeatureEnabled( @@ -1560,35 +1589,16 @@ RouteSpecificFilterConfigConstSharedPtr PerFilterConfigs::createRouteSpecificFil PerFilterConfigs::PerFilterConfigs( const Protobuf::Map& typed_configs, - const Protobuf::Map& configs, const OptionalHttpFilters& optional_http_filters, Server::Configuration::ServerFactoryContext& factory_context, ProtobufMessage::ValidationVisitor& validator) { - if (!typed_configs.empty() && !configs.empty()) { - throw EnvoyException("Only one of typed_configs or configs can be specified"); - } - for (const auto& it : typed_configs) { // TODO(zuercher): canonicalization may be removed when deprecated filter names are removed const auto& name = Extensions::HttpFilters::Common::FilterNameUtil::canonicalFilterName(it.first); - auto object = - createRouteSpecificFilterConfig(name, it.second, ProtobufWkt::Struct::default_instance(), - optional_http_filters, factory_context, validator); - if (object != nullptr) { - configs_[name] = std::move(object); - } - } - - for (const auto& it : configs) { - // TODO(zuercher): canonicalization may be removed when deprecated filter names are removed - const auto& name = - Extensions::HttpFilters::Common::FilterNameUtil::canonicalFilterName(it.first); - - auto object = - createRouteSpecificFilterConfig(name, ProtobufWkt::Any::default_instance(), it.second, - optional_http_filters, factory_context, validator); + auto object = createRouteSpecificFilterConfig(name, it.second, optional_http_filters, + factory_context, validator); if (object != nullptr) { configs_[name] = std::move(object); } diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index a6add7e18e3d3..f291fd45e7b74 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -77,7 +77,6 @@ using OptionalHttpFilters = absl::flat_hash_set; class PerFilterConfigs : public Logger::Loggable { public: PerFilterConfigs(const Protobuf::Map& typed_configs, - const Protobuf::Map& configs, const OptionalHttpFilters& optional_http_filters, Server::Configuration::ServerFactoryContext& factory_context, ProtobufMessage::ValidationVisitor& validator); @@ -87,7 +86,6 @@ class PerFilterConfigs : public Logger::Loggable { private: RouteSpecificFilterConfigConstSharedPtr createRouteSpecificFilterConfig(const std::string& name, const ProtobufWkt::Any& typed_config, - const ProtobufWkt::Struct& config, const OptionalHttpFilters& optional_http_filters, Server::Configuration::ServerFactoryContext& factory_context, ProtobufMessage::ValidationVisitor& validator); @@ -127,12 +125,20 @@ class SslRedirectRoute : public Route { const RouteEntry* routeEntry() const override { return nullptr; } const Decorator* decorator() const override { return nullptr; } const RouteTracing* tracingConfig() const override { return nullptr; } - const RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override { + const RouteSpecificFilterConfig* mostSpecificPerFilterConfig(const std::string&) const override { return nullptr; } + void traversePerFilterConfig( + const std::string&, + std::function) const override {} + const envoy::config::core::v3::Metadata& metadata() const override { return metadata_; } + const Envoy::Config::TypedMetadata& typedMetadata() const override { return typed_metadata_; } private: static const SslRedirector SSL_REDIRECTOR; + static const envoy::config::core::v3::Metadata metadata_; + static const Envoy::Config::TypedMetadataImpl + typed_metadata_; }; /** @@ -157,7 +163,7 @@ class CorsPolicyImpl : public CorsPolicy { return loader_.snapshot().featureEnabled(filter_enabled.runtime_key(), filter_enabled.default_value()); } - return legacy_enabled_; + return true; }; bool shadowEnabled() const override { if (config_.has_shadow_enabled()) { @@ -177,7 +183,6 @@ class CorsPolicyImpl : public CorsPolicy { const std::string expose_headers_; const std::string max_age_; absl::optional allow_credentials_{}; - const bool legacy_enabled_; }; class ConfigImpl; @@ -207,7 +212,7 @@ class VirtualHostImpl : public VirtualHost { Stats::StatName statName() const override { return stat_name_storage_.statName(); } const RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; } const Config& routeConfig() const override; - const RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override; + const RouteSpecificFilterConfig* perFilterConfig(const std::string&) const; bool includeAttemptCountInRequest() const override { return include_attempt_count_in_request_; } bool includeAttemptCountInResponse() const override { return include_attempt_count_in_response_; } const absl::optional& retryPolicy() const { @@ -586,7 +591,14 @@ class RouteEntryImplBase : public RouteEntry, const RouteEntry* routeEntry() const override; const Decorator* decorator() const override { return decorator_.get(); } const RouteTracing* tracingConfig() const override { return route_tracing_.get(); } - const RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override; + const RouteSpecificFilterConfig* + mostSpecificPerFilterConfig(const std::string& name) const override { + auto* config = per_filter_configs_.get(name); + return config ? config : vhost_.perFilterConfig(name); + } + void traversePerFilterConfig( + const std::string& filter_name, + std::function cb) const override; protected: const bool case_sensitive_; @@ -732,9 +744,14 @@ class RouteEntryImplBase : public RouteEntry, const RouteEntry* routeEntry() const override { return this; } const Decorator* decorator() const override { return parent_->decorator(); } const RouteTracing* tracingConfig() const override { return parent_->tracingConfig(); } - - const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const override { - return parent_->perFilterConfig(name); + const RouteSpecificFilterConfig* + mostSpecificPerFilterConfig(const std::string& name) const override { + return parent_->mostSpecificPerFilterConfig(name); + } + void traversePerFilterConfig( + const std::string& filter_name, + std::function cb) const override { + parent_->traversePerFilterConfig(filter_name, cb); }; private: @@ -784,7 +801,17 @@ class RouteEntryImplBase : public RouteEntry, Http::HeaderTransforms responseHeaderTransforms(const StreamInfo::StreamInfo& stream_info, bool do_formatting = true) const override; - const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const override; + const RouteSpecificFilterConfig* + mostSpecificPerFilterConfig(const std::string& name) const override { + auto* config = per_filter_configs_.get(name); + return config ? config : DynamicRouteEntry::mostSpecificPerFilterConfig(name); + } + + void traversePerFilterConfig( + const std::string& filter_name, + std::function cb) const override; + + const Http::LowerCaseString& clusterHeaderName() { return cluster_header_name_; } private: const std::string runtime_key_; @@ -795,6 +822,7 @@ class RouteEntryImplBase : public RouteEntry, HeaderParserPtr response_headers_parser_; PerFilterConfigs per_filter_configs_; const std::string host_rewrite_; + const Http::LowerCaseString cluster_header_name_; }; using WeightedClusterEntrySharedPtr = std::shared_ptr; @@ -878,6 +906,7 @@ class RouteEntryImplBase : public RouteEntry, envoy::config::core::v3::Metadata metadata_; Envoy::Config::TypedMetadataImpl typed_metadata_; const bool match_grpc_; + const std::vector dynamic_metadata_; // TODO(danielhochman): refactor multimap into unordered_map since JSON is unordered map. const std::multimap opaque_config_; diff --git a/source/common/router/config_utility.cc b/source/common/router/config_utility.cc index f94d6b68a19ef..c56e4d651c41a 100644 --- a/source/common/router/config_utility.cc +++ b/source/common/router/config_utility.cc @@ -14,7 +14,7 @@ namespace Envoy { namespace Router { namespace { -absl::optional +absl::optional> maybeCreateStringMatcher(const envoy::config::route::v3::QueryParameterMatcher& config) { switch (config.query_parameter_match_specifier_case()) { case envoy::config::route::v3::QueryParameterMatcher::QueryParameterMatchSpecifierCase:: @@ -27,14 +27,7 @@ maybeCreateStringMatcher(const envoy::config::route::v3::QueryParameterMatcher& } case envoy::config::route::v3::QueryParameterMatcher::QueryParameterMatchSpecifierCase:: QUERY_PARAMETER_MATCH_SPECIFIER_NOT_SET: { - if (config.hidden_envoy_deprecated_value().empty()) { - // Present match. - return absl::nullopt; - } - - envoy::type::matcher::v3::StringMatcher matcher_config; - matcher_config.set_exact(config.hidden_envoy_deprecated_value()); - return Matchers::StringMatcherImpl(matcher_config); + return absl::nullopt; } } diff --git a/source/common/router/config_utility.h b/source/common/router/config_utility.h index 3c8167c2faa38..4895f103de3a7 100644 --- a/source/common/router/config_utility.h +++ b/source/common/router/config_utility.h @@ -43,7 +43,8 @@ class ConfigUtility { private: const std::string name_; - const absl::optional matcher_; + const absl::optional> + matcher_; }; using QueryParameterMatcherPtr = std::unique_ptr; diff --git a/source/common/router/delegating_route_impl.cc b/source/common/router/delegating_route_impl.cc index 935a648e9cbeb..422b43248dff6 100644 --- a/source/common/router/delegating_route_impl.cc +++ b/source/common/router/delegating_route_impl.cc @@ -14,10 +14,6 @@ const Decorator* DelegatingRoute::decorator() const { return base_route_->decora const RouteTracing* DelegatingRoute::tracingConfig() const { return base_route_->tracingConfig(); } -const RouteSpecificFilterConfig* DelegatingRoute::perFilterConfig(const std::string& name) const { - return base_route_->perFilterConfig(name); -} - // Router:DelegatingRouteEntry void DelegatingRouteEntry::finalizeResponseHeaders( Http::ResponseHeaderMap& headers, const StreamInfo::StreamInfo& stream_info) const { @@ -142,14 +138,6 @@ bool DelegatingRouteEntry::includeVirtualHostRateLimits() const { return base_route_->routeEntry()->includeVirtualHostRateLimits(); } -const Envoy::Config::TypedMetadata& DelegatingRouteEntry::typedMetadata() const { - return base_route_->routeEntry()->typedMetadata(); -} - -const envoy::config::core::v3::Metadata& DelegatingRouteEntry::metadata() const { - return base_route_->routeEntry()->metadata(); -} - const TlsContextMatchCriteria* DelegatingRouteEntry::tlsContextMatchCriteria() const { return base_route_->routeEntry()->tlsContextMatchCriteria(); } @@ -158,11 +146,6 @@ const PathMatchCriterion& DelegatingRouteEntry::pathMatchCriterion() const { return base_route_->routeEntry()->pathMatchCriterion(); } -const RouteSpecificFilterConfig* -DelegatingRouteEntry::perFilterConfig(const std::string& name) const { - return base_route_->routeEntry()->perFilterConfig(name); -} - bool DelegatingRouteEntry::includeAttemptCountInRequest() const { return base_route_->routeEntry()->includeAttemptCountInRequest(); } diff --git a/source/common/router/delegating_route_impl.h b/source/common/router/delegating_route_impl.h index 53721b394c53d..f9696e8fc7441 100644 --- a/source/common/router/delegating_route_impl.h +++ b/source/common/router/delegating_route_impl.h @@ -2,6 +2,8 @@ #include "envoy/router/router.h" +#include "source/common/config/metadata.h" + namespace Envoy { namespace Router { @@ -24,7 +26,23 @@ class DelegatingRoute : public Router::Route { const Router::RouteEntry* routeEntry() const override; const Router::Decorator* decorator() const override; const Router::RouteTracing* tracingConfig() const override; - const Router::RouteSpecificFilterConfig* perFilterConfig(const std::string&) const override; + + const RouteSpecificFilterConfig* + mostSpecificPerFilterConfig(const std::string& name) const override { + return base_route_->mostSpecificPerFilterConfig(name); + } + void traversePerFilterConfig( + const std::string& filter_name, + std::function cb) const override { + base_route_->traversePerFilterConfig(filter_name, cb); + } + + const envoy::config::core::v3::Metadata& metadata() const override { + return base_route_->metadata(); + } + const Envoy::Config::TypedMetadata& typedMetadata() const override { + return base_route_->typedMetadata(); + } private: const Router::RouteConstSharedPtr base_route_; @@ -80,11 +98,8 @@ class DelegatingRouteEntry : public Router::RouteEntry { const MetadataMatchCriteria* metadataMatchCriteria() const override; const std::multimap& opaqueConfig() const override; bool includeVirtualHostRateLimits() const override; - const Envoy::Config::TypedMetadata& typedMetadata() const override; - const envoy::config::core::v3::Metadata& metadata() const override; const TlsContextMatchCriteria* tlsContextMatchCriteria() const override; const PathMatchCriterion& pathMatchCriterion() const override; - const RouteSpecificFilterConfig* perFilterConfig(const std::string& name) const override; bool includeAttemptCountInRequest() const override; bool includeAttemptCountInResponse() const override; const UpgradeMap& upgradeMap() const override; diff --git a/source/common/router/header_formatter.cc b/source/common/router/header_formatter.cc index 80482fc17b3bb..bcc32857d5db5 100644 --- a/source/common/router/header_formatter.cc +++ b/source/common/router/header_formatter.cc @@ -224,11 +224,11 @@ parseRequestHeader(absl::string_view param) { StreamInfoHeaderFormatter::FieldExtractor sslConnectionInfoStringHeaderExtractor( std::function string_extractor) { return [string_extractor](const StreamInfo::StreamInfo& stream_info) { - if (stream_info.downstreamSslConnection() == nullptr) { + if (stream_info.downstreamAddressProvider().sslConnection() == nullptr) { return std::string(); } - return string_extractor(*stream_info.downstreamSslConnection()); + return string_extractor(*stream_info.downstreamAddressProvider().sslConnection()); }; } diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 670b5e1268cc0..049e0f752fded 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -6,7 +6,6 @@ #include #include "envoy/admin/v3/config_dump.pb.h" -#include "envoy/api/v2/route.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -15,7 +14,6 @@ #include "source/common/common/fmt.h" #include "source/common/config/api_version.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/http/header_map_impl.h" #include "source/common/protobuf/utility.h" #include "source/common/router/config_impl.h" @@ -75,7 +73,6 @@ RdsRouteConfigSubscription::RdsRouteConfigSubscription( const std::string& stat_prefix, const OptionalHttpFilters& optional_http_filters, Envoy::Router::RouteConfigProviderManagerImpl& route_config_provider_manager) : Envoy::Config::SubscriptionBase( - rds.config_source().resource_api_version(), factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), route_config_name_(rds.route_config_name()), scope_(factory_context.scope().createScope(stat_prefix + "rds." + route_config_name_ + ".")), @@ -141,11 +138,7 @@ void RdsRouteConfigSubscription::onConfigUpdate( route_config_name_, config_update_info_->configHash()); maybeCreateInitManager(version_info, noop_init_manager, resume_rds); vhds_subscription_ = std::make_unique( - config_update_info_, factory_context_, stat_prefix_, route_config_provider_opt_, - config_update_info_->protobufConfiguration() - .vhds() - .config_source() - .resource_api_version()); + config_update_info_, factory_context_, stat_prefix_, route_config_provider_opt_); vhds_subscription_->registerInitTargetWithInitManager( noop_init_manager == nullptr ? local_init_manager_ : *noop_init_manager); } @@ -400,7 +393,7 @@ RouteConfigProviderManagerImpl::dumpRouteConfigs( auto* dynamic_config = config_dump->mutable_dynamic_route_configs()->Add(); dynamic_config->set_version_info(subscription->routeConfigUpdate()->configVersion()); dynamic_config->mutable_route_config()->PackFrom( - API_RECOVER_ORIGINAL(subscription->routeConfigUpdate()->protobufConfiguration())); + subscription->routeConfigUpdate()->protobufConfiguration()); TimestampUtil::systemClockToTimestamp(subscription->routeConfigUpdate()->lastUpdated(), *dynamic_config->mutable_last_updated()); } @@ -412,8 +405,7 @@ RouteConfigProviderManagerImpl::dumpRouteConfigs( continue; } auto* static_config = config_dump->mutable_static_route_configs()->Add(); - static_config->mutable_route_config()->PackFrom( - API_RECOVER_ORIGINAL(provider->configInfo().value().config_)); + static_config->mutable_route_config()->PackFrom(provider->configInfo().value().config_); TimestampUtil::systemClockToTimestamp(provider->lastUpdated(), *static_config->mutable_last_updated()); } diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 0a5bb031136bb..d1115c8a40df0 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -171,18 +171,21 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& req const Http::HeaderEntry* header_expected_timeout_entry = request_headers.EnvoyExpectedRequestTimeoutMs(); if (header_expected_timeout_entry) { - trySetGlobalTimeout(header_expected_timeout_entry, timeout); + trySetGlobalTimeout(*header_expected_timeout_entry, timeout); } else { const Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs(); - if (trySetGlobalTimeout(header_timeout_entry, timeout)) { + if (header_timeout_entry) { + trySetGlobalTimeout(*header_timeout_entry, timeout); request_headers.removeEnvoyUpstreamRequestTimeoutMs(); } } } else { const Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs(); - if (trySetGlobalTimeout(header_timeout_entry, timeout)) { + + if (header_timeout_entry) { + trySetGlobalTimeout(*header_timeout_entry, timeout); request_headers.removeEnvoyUpstreamRequestTimeoutMs(); } } @@ -226,16 +229,21 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& req return timeout; } -bool FilterUtility::trySetGlobalTimeout(const Http::HeaderEntry* header_timeout_entry, +absl::optional +FilterUtility::tryParseHeaderTimeout(const Http::HeaderEntry& header_timeout_entry) { + uint64_t header_timeout; + if (absl::SimpleAtoi(header_timeout_entry.value().getStringView(), &header_timeout)) { + return std::chrono::milliseconds(header_timeout); + } + return absl::nullopt; +} + +void FilterUtility::trySetGlobalTimeout(const Http::HeaderEntry& header_timeout_entry, TimeoutData& timeout) { - if (header_timeout_entry) { - uint64_t header_timeout; - if (absl::SimpleAtoi(header_timeout_entry->value().getStringView(), &header_timeout)) { - timeout.global_timeout_ = std::chrono::milliseconds(header_timeout); - } - return true; + const auto timeout_ms = tryParseHeaderTimeout(header_timeout_entry); + if (timeout_ms.has_value()) { + timeout.global_timeout_ = timeout_ms.value(); } - return false; } FilterUtility::HedgingParams @@ -579,6 +587,14 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, grpc_request_, hedging_params_.hedge_on_per_try_timeout_, config_.respect_expected_rq_timeout_); + const Http::HeaderEntry* header_max_stream_duration_entry = + headers.EnvoyUpstreamStreamDurationMs(); + if (header_max_stream_duration_entry) { + dynamic_max_stream_duration_ = + FilterUtility::tryParseHeaderTimeout(*header_max_stream_duration_entry); + headers.removeEnvoyUpstreamStreamDurationMs(); + } + // If this header is set with any value, use an alternate response code on timeout if (headers.EnvoyUpstreamRequestTimeoutAltResponse()) { timeout_response_code_ = Http::Code::NoContent; @@ -602,15 +618,16 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, headers.setEnvoyAttemptCount(attempt_count_); }; } + callbacks_->streamInfo().setAttemptCount(attempt_count_); // Inject the active span's tracing context into the request headers. callbacks_->activeSpan().injectContext(headers); route_entry_->finalizeRequestHeaders(headers, callbacks_->streamInfo(), !config_.suppress_envoy_headers_); - FilterUtility::setUpstreamScheme(headers, - callbacks_->streamInfo().downstreamSslConnection() != nullptr, - host->transportSocketFactory().implementsSecureTransport()); + FilterUtility::setUpstreamScheme( + headers, callbacks_->streamInfo().downstreamAddressProvider().sslConnection() != nullptr, + host->transportSocketFactory().implementsSecureTransport()); // Ensure an http transport scheme is selected before continuing with decoding. ASSERT(headers.Scheme()); @@ -1298,7 +1315,7 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt if (route_entry_->internalRedirectPolicy().enabled() && route_entry_->internalRedirectPolicy().shouldRedirectForResponseCode( static_cast(response_code)) && - setupRedirect(*headers, upstream_request)) { + setupRedirect(*headers)) { return; // If the redirect could not be handled, fail open and let it pass to the // next downstream. @@ -1354,6 +1371,14 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt downstream_response_started_ = true; final_upstream_request_ = &upstream_request; + // In upstream request hedging scenarios the upstream connection ID set in onPoolReady might not + // be the connection ID of the upstream connection that ended up receiving upstream headers. Thus + // reset the upstream connection ID here with the ID of the connection that ultimately was the + // transport for the final upstream request. + if (final_upstream_request_->streamInfo().upstreamConnectionId().has_value()) { + callbacks_->streamInfo().setUpstreamConnectionId( + final_upstream_request_->streamInfo().upstreamConnectionId().value()); + } resetOtherUpstreams(upstream_request); if (end_stream) { onUpstreamComplete(upstream_request); @@ -1462,24 +1487,10 @@ void Filter::onUpstreamComplete(UpstreamRequest& upstream_request) { cleanup(); } -bool Filter::setupRedirect(const Http::ResponseHeaderMap& headers, - UpstreamRequest& upstream_request) { +bool Filter::setupRedirect(const Http::ResponseHeaderMap& headers) { ENVOY_STREAM_LOG(debug, "attempting internal redirect", *callbacks_); const Http::HeaderEntry* location = headers.Location(); - // If the internal redirect succeeds, callbacks_->recreateStream() will result in the - // destruction of this filter before the stream is marked as complete, and onDestroy will reset - // the stream. - // - // Normally when a stream is complete we signal this by resetting the upstream but this cannot - // be done in this case because if recreateStream fails, the "failure" path continues to call - // code in onUpstreamHeaders which requires the upstream *not* be reset. To avoid onDestroy - // performing a spurious stream reset in the case recreateStream() succeeds, we explicitly track - // stream completion here and check it in onDestroy. This is annoyingly complicated but is - // better than needlessly resetting streams. - attempting_internal_redirect_with_complete_stream_ = - upstream_request.upstreamTiming().last_upstream_rx_byte_received_ && downstream_end_stream_; - const uint64_t status_code = Http::Utility::getResponseStatus(headers); // Redirects are not supported for streaming requests yet. @@ -1493,8 +1504,6 @@ bool Filter::setupRedirect(const Http::ResponseHeaderMap& headers, return true; } - attempting_internal_redirect_with_complete_stream_ = false; - ENVOY_STREAM_LOG(debug, "Internal redirect failed", *callbacks_); cluster_->stats().upstream_internal_redirect_failed_total_.inc(); return false; @@ -1560,7 +1569,17 @@ bool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& do // Replace the original host, scheme and path. downstream_headers.setScheme(absolute_url.scheme()); downstream_headers.setHost(absolute_url.hostAndPort()); - downstream_headers.setPath(absolute_url.pathAndQueryParams()); + + auto path_and_query = absolute_url.pathAndQueryParams(); + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http_reject_path_with_fragment")) { + // Envoy treats internal redirect as a new request and will reject it if URI path + // contains #fragment. However the Location header is allowed to have #fragment in URI path. To + // prevent Envoy from rejecting internal redirect, strip the #fragment from Location URI if it + // is present. + auto fragment_pos = path_and_query.find('#'); + path_and_query = path_and_query.substr(0, fragment_pos); + } + downstream_headers.setPath(path_and_query); callbacks_->clearRouteCache(); const auto route = callbacks_->route(); @@ -1605,6 +1624,7 @@ void Filter::doRetry() { is_retry_ = true; attempt_count_++; + callbacks_->streamInfo().setAttemptCount(attempt_count_); ASSERT(pending_retries_ > 0); pending_retries_--; diff --git a/source/common/router/router.h b/source/common/router/router.h index 386815c478402..9a1d057793a26 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -146,7 +146,22 @@ class FilterUtility { bool per_try_timeout_hedging_enabled, bool respect_expected_rq_timeout); - static bool trySetGlobalTimeout(const Http::HeaderEntry* header_timeout_entry, + /** + * Try to parse a header entry that may have a timeout field + * + * @param header_timeout_entry header entry which may contain a timeout value. + * @return result timeout value from header. It will return nullopt if parse failed. + */ + static absl::optional + tryParseHeaderTimeout(const Http::HeaderEntry& header_timeout_entry); + + /** + * Try to set global timeout. + * + * @param header_timeout_entry header entry which may contain a timeout value. + * @param timeout timeout data to set from header timeout entry. + */ + static void trySetGlobalTimeout(const Http::HeaderEntry& header_timeout_entry, TimeoutData& timeout); /** @@ -262,6 +277,7 @@ class RouterFilterInterface { virtual Upstream::ClusterInfoConstSharedPtr cluster() PURE; virtual FilterConfig& config() PURE; virtual FilterUtility::TimeoutData timeout() PURE; + virtual absl::optional dynamicMaxStreamDuration() const PURE; virtual Http::RequestHeaderMap* downstreamHeaders() PURE; virtual Http::RequestTrailerMap* downstreamTrailers() PURE; virtual bool downstreamResponseStarted() const PURE; @@ -285,9 +301,7 @@ class Filter : Logger::Loggable, Filter(FilterConfig& config) : config_(config), final_upstream_request_(nullptr), downstream_100_continue_headers_encoded_(false), downstream_response_started_(false), - downstream_end_stream_(false), is_retry_(false), - attempting_internal_redirect_with_complete_stream_(false), - request_buffer_overflowed_(false) {} + downstream_end_stream_(false), is_retry_(false), request_buffer_overflowed_(false) {} ~Filter() override; @@ -408,8 +422,8 @@ class Filter : Logger::Loggable, std::string value; const Network::Connection* conn = downstreamConnection(); // Need to check for null conn if this is ever used by Http::AsyncClient in the future. - value = conn->addressProvider().remoteAddress()->asString() + - conn->addressProvider().localAddress()->asString(); + value = conn->connectionInfoProvider().remoteAddress()->asString() + + conn->connectionInfoProvider().localAddress()->asString(); const std::string cookie_value = Hex::uint64ToHex(HashUtil::xxHash64(value)); downstream_set_cookies_.emplace_back( @@ -436,6 +450,9 @@ class Filter : Logger::Loggable, Upstream::ClusterInfoConstSharedPtr cluster() override { return cluster_; } FilterConfig& config() override { return config_; } FilterUtility::TimeoutData timeout() override { return timeout_; } + absl::optional dynamicMaxStreamDuration() const override { + return dynamic_max_stream_duration_; + } Http::RequestHeaderMap* downstreamHeaders() override { return downstream_headers_; } Http::RequestTrailerMap* downstreamTrailers() override { return downstream_trailers_; } bool downstreamResponseStarted() const override { return downstream_response_started_; } @@ -497,7 +514,7 @@ class Filter : Logger::Loggable, // for the remaining upstream requests to return. void resetOtherUpstreams(UpstreamRequest& upstream_request); void sendNoHealthyUpstreamResponse(); - bool setupRedirect(const Http::ResponseHeaderMap& headers, UpstreamRequest& upstream_request); + bool setupRedirect(const Http::ResponseHeaderMap& headers); bool convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& downstream_headers, const Http::HeaderEntry& internal_redirect, uint64_t status_code); @@ -535,7 +552,8 @@ class Filter : Logger::Loggable, MetadataMatchCriteriaConstPtr metadata_match_; std::function modify_headers_; std::vector> active_shadow_policies_{}; - + // The stream lifetime configured by request header. + absl::optional dynamic_max_stream_duration_; // list of cookies to add to upstream headers std::vector downstream_set_cookies_; @@ -544,7 +562,6 @@ class Filter : Logger::Loggable, bool downstream_end_stream_ : 1; bool is_retry_ : 1; bool include_attempt_count_in_request_ : 1; - bool attempting_internal_redirect_with_complete_stream_ : 1; bool request_buffer_overflowed_ : 1; bool internal_redirects_with_body_enabled_ : 1; uint32_t attempt_count_{1}; diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index d7fe0b081ac54..5f3eda7c74515 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -76,7 +76,10 @@ bool SourceClusterAction::populateDescriptor(RateLimit::DescriptorEntry& descrip bool DestinationClusterAction::populateDescriptor(RateLimit::DescriptorEntry& descriptor_entry, const std::string&, const Http::RequestHeaderMap&, const StreamInfo::StreamInfo& info) const { - descriptor_entry = {"destination_cluster", info.routeEntry()->clusterName()}; + if (info.route() == nullptr || info.route()->routeEntry() == nullptr) { + return false; + } + descriptor_entry = {"destination_cluster", info.route()->routeEntry()->clusterName()}; return true; } @@ -137,7 +140,7 @@ bool MetaDataAction::populateDescriptor(RateLimit::DescriptorEntry& descriptor_e metadata_source = &info.dynamicMetadata(); break; case envoy::config::route::v3::RateLimit::Action::MetaData::ROUTE_ENTRY: - metadata_source = &info.routeEntry()->metadata(); + metadata_source = &info.route()->metadata(); break; default: NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index 13cee6e2449e0..eac4557b0ceb8 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -3,7 +3,6 @@ #include #include "envoy/admin/v3/config_dump.pb.h" -#include "envoy/api/v2/scoped_route.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/scoped_route.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" @@ -15,7 +14,6 @@ #include "source/common/common/utility.h" #include "source/common/config/api_version.h" #include "source/common/config/resource_name.h" -#include "source/common/config/version_converter.h" #include "source/common/config/xds_resource.h" #include "source/common/init/manager_impl.h" #include "source/common/init/watcher_impl.h" @@ -109,7 +107,6 @@ ScopedRdsConfigSubscription::ScopedRdsConfigSubscription( : DeltaConfigSubscriptionInstance("SRDS", manager_identifier, config_provider_manager, factory_context), Envoy::Config::SubscriptionBase( - scoped_rds.scoped_rds_config_source().resource_api_version(), factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), factory_context_(factory_context), name_(name), scope_(factory_context.scope().createScope(stat_prefix + "scoped_rds." + name + ".")), @@ -346,13 +343,12 @@ void ScopedRdsConfigSubscription::onConfigUpdate( // server. std::unique_ptr srds_initialization_continuation; ASSERT(localInitManager().state() > Init::Manager::State::Uninitialized); - const auto type_urls = - Envoy::Config::getAllVersionTypeUrls(); + const auto type_url = Envoy::Config::getTypeUrl(); // Pause RDS to not send a burst of RDS requests until we start all the new subscriptions. // In the case that localInitManager is uninitialized, RDS is already paused // either by Server init or LDS init. if (factory_context_.clusterManager().adsMux()) { - resume_rds = factory_context_.clusterManager().adsMux()->pause(type_urls); + resume_rds = factory_context_.clusterManager().adsMux()->pause(type_url); } // if local init manager is initialized, the parent init manager may have gone away. if (localInitManager().state() == Init::Manager::State::Initialized) { @@ -539,8 +535,7 @@ ScopedRoutesConfigProviderManager::dumpConfigs(const Matchers::StringMatcher& na if (!name_matcher.match(it.second->configProto().name())) { continue; } - dynamic_config->mutable_scoped_route_configs()->Add()->PackFrom( - API_RECOVER_ORIGINAL(it.second->configProto())); + dynamic_config->mutable_scoped_route_configs()->Add()->PackFrom(it.second->configProto()); } TimestampUtil::systemClockToTimestamp(subscription->lastUpdated(), *dynamic_config->mutable_last_updated()); @@ -557,8 +552,7 @@ ScopedRoutesConfigProviderManager::dumpConfigs(const Matchers::StringMatcher& na if (!name_matcher.match(config_proto->name())) { continue; } - inline_config->mutable_scoped_route_configs()->Add()->PackFrom( - API_RECOVER_ORIGINAL(*config_proto)); + inline_config->mutable_scoped_route_configs()->Add()->PackFrom(*config_proto); } TimestampUtil::systemClockToTimestamp(provider->lastUpdated(), *inline_config->mutable_last_updated()); diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 03efc074b9a96..35cbfc8f28fcf 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -195,7 +195,7 @@ void UpstreamRequest::decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) { void UpstreamRequest::dumpState(std::ostream& os, int indent_level) const { const char* spaces = spacesForLevel(indent_level); os << spaces << "UpstreamRequest " << this << "\n"; - const auto addressProvider = connection().addressProviderSharedPtr(); + const auto addressProvider = connection().connectionInfoProviderSharedPtr(); const Http::RequestHeaderMap* request_headers = parent_.downstreamHeaders(); DUMP_DETAILS(addressProvider); DUMP_DETAILS(request_headers); @@ -417,8 +417,15 @@ void UpstreamRequest::onPoolReady( stream_info_.setUpstreamLocalAddress(upstream_local_address); parent_.callbacks()->streamInfo().setUpstreamLocalAddress(upstream_local_address); - stream_info_.setUpstreamSslConnection(info.downstreamSslConnection()); - parent_.callbacks()->streamInfo().setUpstreamSslConnection(info.downstreamSslConnection()); + stream_info_.setUpstreamSslConnection(info.downstreamAddressProvider().sslConnection()); + parent_.callbacks()->streamInfo().setUpstreamSslConnection( + info.downstreamAddressProvider().sslConnection()); + + if (info.downstreamAddressProvider().connectionID().has_value()) { + stream_info_.setUpstreamConnectionId(info.downstreamAddressProvider().connectionID().value()); + parent_.callbacks()->streamInfo().setUpstreamConnectionId( + info.downstreamAddressProvider().connectionID().value()); + } if (parent_.downstreamEndStream()) { setupPerTryTimeout(); @@ -450,14 +457,18 @@ void UpstreamRequest::onPoolReady( paused_for_connect_ = true; } - if (upstream_host_->cluster().commonHttpProtocolOptions().has_max_stream_duration()) { - const auto max_stream_duration = std::chrono::milliseconds(DurationUtil::durationToMilliseconds( + absl::optional max_stream_duration; + if (parent_.dynamicMaxStreamDuration().has_value()) { + max_stream_duration = parent_.dynamicMaxStreamDuration().value(); + } else if (upstream_host_->cluster().commonHttpProtocolOptions().has_max_stream_duration()) { + max_stream_duration = std::chrono::milliseconds(DurationUtil::durationToMilliseconds( upstream_host_->cluster().commonHttpProtocolOptions().max_stream_duration())); - if (max_stream_duration.count()) { - max_stream_duration_timer_ = parent_.callbacks()->dispatcher().createTimer( - [this]() -> void { onStreamMaxDurationReached(); }); - max_stream_duration_timer_->enableTimer(max_stream_duration); - } + } + + if (max_stream_duration.has_value() && max_stream_duration->count()) { + max_stream_duration_timer_ = parent_.callbacks()->dispatcher().createTimer( + [this]() -> void { onStreamMaxDurationReached(); }); + max_stream_duration_timer_->enableTimer(*max_stream_duration); } const Http::Status status = diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index 8776089fe0434..f0b07e8bdacd5 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -119,6 +119,8 @@ class UpstreamRequest : public Logger::Loggable, } bool encodeComplete() const { return encode_complete_; } RouterFilterInterface& parent() { return parent_; } + // Exposes streamInfo for the upstream stream. + const StreamInfo::StreamInfo& streamInfo() const { return stream_info_; } private: bool shouldSendEndStream() { diff --git a/source/common/router/vhds.cc b/source/common/router/vhds.cc index b291b9af14f5d..324d952c42b6d 100644 --- a/source/common/router/vhds.cc +++ b/source/common/router/vhds.cc @@ -5,7 +5,6 @@ #include #include -#include "envoy/api/v2/route/route_components.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/subscription.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -24,10 +23,8 @@ namespace Router { VhdsSubscription::VhdsSubscription(RouteConfigUpdatePtr& config_update_info, Server::Configuration::ServerFactoryContext& factory_context, const std::string& stat_prefix, - absl::optional& route_config_provider_opt, - envoy::config::core::v3::ApiVersion resource_api_version) + absl::optional& route_config_provider_opt) : Envoy::Config::SubscriptionBase( - resource_api_version, factory_context.messageValidationContext().dynamicValidationVisitor(), "name"), config_update_info_(config_update_info), scope_(factory_context.scope().createScope(stat_prefix + "vhds." + diff --git a/source/common/router/vhds.h b/source/common/router/vhds.h index 099b00ff322d9..e1d36da175afc 100644 --- a/source/common/router/vhds.h +++ b/source/common/router/vhds.h @@ -42,9 +42,7 @@ class VhdsSubscription : Envoy::Config::SubscriptionBase& route_config_providers, - const envoy::config::core::v3::ApiVersion resource_api_version = - envoy::config::core::v3::ApiVersion::AUTO); + absl::optional& route_config_providers); ~VhdsSubscription() override { init_target_.ready(); } void registerInitTargetWithInitManager(Init::Manager& m) { m.add(init_target_); } diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 11d268f22c7c4..cf0aeb7ae4807 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -59,7 +59,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.add_and_validate_scheme_header", "envoy.reloadable_features.allow_response_for_timeout", "envoy.reloadable_features.check_unsupported_typed_per_filter_config", - "envoy.reloadable_features.check_ocsp_policy", + "envoy.reloadable_features.conn_pool_delete_when_idle", "envoy.reloadable_features.correct_scheme_and_xfp", "envoy.reloadable_features.disable_tls_inspector_injection", "envoy.reloadable_features.dont_add_content_length_for_bodiless_requests", @@ -72,6 +72,9 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.health_check.immediate_failure_exclude_from_cluster", "envoy.reloadable_features.http2_consume_stream_refused_errors", "envoy.reloadable_features.http2_skip_encoding_empty_trailers", + "envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect", + "envoy.reloadable_features.http_reject_path_with_fragment", + "envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled", "envoy.reloadable_features.http_transport_failure_reason_in_body", "envoy.reloadable_features.improved_stream_limit_handling", "envoy.reloadable_features.internal_redirects_with_body", @@ -79,10 +82,8 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.listener_wildcard_match_ip_family", "envoy.reloadable_features.new_tcp_connection_pool", "envoy.reloadable_features.no_chunked_encoding_header_for_304", - "envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing", "envoy.reloadable_features.preserve_downstream_scheme", "envoy.reloadable_features.remove_forked_chromium_url", - "envoy.reloadable_features.require_ocsp_response_for_must_staple_certs", "envoy.reloadable_features.require_strict_1xx_and_204_response_headers", "envoy.reloadable_features.return_502_for_upstream_protocol_errors", "envoy.reloadable_features.send_strict_1xx_and_204_response_headers", @@ -98,6 +99,8 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.upstream_http2_flood_checks", "envoy.restart_features.use_apple_api_for_dns_lookups", "envoy.reloadable_features.header_map_correctly_coalesce_cookies", + "envoy.reloadable_features.sanitize_http_header_referer", + "envoy.reloadable_features.skip_dispatching_frames_for_closed_connection", }; // This is a section for officially sanctioned runtime features which are too @@ -111,15 +114,10 @@ constexpr const char* runtime_features[] = { constexpr const char* disabled_runtime_features[] = { // v2 is fatal-by-default. "envoy.test_only.broken_in_production.enable_deprecated_v2_api", - // Defaulting to off due to high risk. - // TODO(ggreenway): Move this to default-on during 1.20 release cycle. - "envoy.reloadable_features.conn_pool_delete_when_idle", // TODO(asraa) flip to true in a separate PR to enable the new JSON by default. "envoy.reloadable_features.remove_legacy_json", // Sentinel and test flag. "envoy.reloadable_features.test_feature_false", - // TODO(kbaichoo): Remove when this is no longer test only. - "envoy.test_only.per_stream_buffer_accounting", // Allows the use of ExtensionWithMatcher to wrap a HTTP filter with a match tree. "envoy.reloadable_features.experimental_matching_api", // When the runtime is flipped to true, use shared cache in getOrCreateRawAsyncClient method if diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 85a7fc91ecec4..bf5ad44ffe5e7 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -453,8 +453,8 @@ void LoaderImpl::onRtdsReady() { RtdsSubscription::RtdsSubscription( LoaderImpl& parent, const envoy::config::bootstrap::v3::RuntimeLayer::RtdsLayer& rtds_layer, Stats::Store& store, ProtobufMessage::ValidationVisitor& validation_visitor) - : Envoy::Config::SubscriptionBase( - rtds_layer.rtds_config().resource_api_version(), validation_visitor, "name"), + : Envoy::Config::SubscriptionBase(validation_visitor, + "name"), parent_(parent), config_source_(rtds_layer.rtds_config()), store_(store), stats_scope_(store_.createScope("runtime")), resource_name_(rtds_layer.name()), init_target_("RTDS " + resource_name_, [this]() { start(); }) {} diff --git a/source/common/secret/BUILD b/source/common/secret/BUILD index 4dc783bc83e32..b7b25aedbe5be 100644 --- a/source/common/secret/BUILD +++ b/source/common/secret/BUILD @@ -19,7 +19,6 @@ envoy_cc_library( "//envoy/server:transport_socket_config_interface", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", - "//source/common/config:version_converter_lib", "//source/common/protobuf:utility_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -63,7 +62,6 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/ssl:certificate_validation_context_config_impl_lib", "//source/common/ssl:tls_certificate_config_impl_lib", - "@envoy_api//envoy/api/v2/auth:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", diff --git a/source/common/secret/sds_api.cc b/source/common/secret/sds_api.cc index 03dba0ccfe555..253391f44940d 100644 --- a/source/common/secret/sds_api.cc +++ b/source/common/secret/sds_api.cc @@ -1,6 +1,5 @@ #include "source/common/secret/sds_api.h" -#include "envoy/api/v2/auth/cert.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -21,7 +20,7 @@ SdsApi::SdsApi(envoy::config::core::v3::ConfigSource sds_config, absl::string_vi ProtobufMessage::ValidationVisitor& validation_visitor, Stats::Store& stats, std::function destructor_cb, Event::Dispatcher& dispatcher, Api::Api& api) : Envoy::Config::SubscriptionBase( - sds_config.resource_api_version(), validation_visitor, "name"), + validation_visitor, "name"), init_target_(fmt::format("SdsApi {}", sds_config_name), [this] { initialize(); }), dispatcher_(dispatcher), api_(api), scope_(stats.createScope(absl::StrCat("sds.", sds_config_name, "."))), diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index a29cfdace2cc4..f2ed3301ab61a 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -191,7 +191,9 @@ class TlsCertificateSdsApi : public SdsApi, public TlsCertificateConfigProvider *sds_tls_certificate_secrets_); // We replace path based secrets with inlined secrets on update. resolveDataSource(files, *resolved_tls_certificate_secrets_->mutable_certificate_chain()); - resolveDataSource(files, *resolved_tls_certificate_secrets_->mutable_private_key()); + if (sds_tls_certificate_secrets_->has_private_key()) { + resolveDataSource(files, *resolved_tls_certificate_secrets_->mutable_private_key()); + } } void validateConfig(const envoy::extensions::transport_sockets::tls::v3::Secret&) override {} std::vector getDataSourceFilenames() override; diff --git a/source/common/secret/secret_manager_impl.cc b/source/common/secret/secret_manager_impl.cc index 5238ff77ae9e8..17f356346144a 100644 --- a/source/common/secret/secret_manager_impl.cc +++ b/source/common/secret/secret_manager_impl.cc @@ -8,7 +8,6 @@ #include "source/common/common/assert.h" #include "source/common/common/logger.h" -#include "source/common/config/version_converter.h" #include "source/common/protobuf/utility.h" #include "source/common/secret/sds_api.h" #include "source/common/secret/secret_provider_impl.h" @@ -155,11 +154,6 @@ GenericSecretConfigProviderSharedPtr SecretManagerImpl::findOrCreateGenericSecre ProtobufTypes::MessagePtr SecretManagerImpl::dumpSecretConfigs(const Matchers::StringMatcher& name_matcher) { - // TODO(htuch): unlike other config providers, we're recreating the original - // Secrets below. This makes it hard to support API_RECOVER_ORIGINAL()-style - // recovery of the original config message. As a result, for now we're - // providing v3 config dumps. For Secrets, the main deprecation of interest - // are the use of v2 Struct config() and verify_subject_alt_name. auto config_dump = std::make_unique(); // Handle static tls key/cert providers. for (const auto& cert_iter : static_tls_certificate_providers_) { diff --git a/source/common/ssl/tls_certificate_config_impl.cc b/source/common/ssl/tls_certificate_config_impl.cc index 1c5073d1ff1ea..fd0332c126af6 100644 --- a/source/common/ssl/tls_certificate_config_impl.cc +++ b/source/common/ssl/tls_certificate_config_impl.cc @@ -28,7 +28,7 @@ static const std::string INLINE_STRING = ""; TlsCertificateConfigImpl::TlsCertificateConfigImpl( const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& config, - Server::Configuration::TransportSocketFactoryContext* factory_context, Api::Api& api) + Server::Configuration::TransportSocketFactoryContext& factory_context, Api::Api& api) : certificate_chain_(Config::DataSource::read(config.certificate_chain(), true, api)), certificate_chain_path_( Config::DataSource::getPath(config.certificate_chain()) @@ -42,19 +42,30 @@ TlsCertificateConfigImpl::TlsCertificateConfigImpl( ocsp_staple_(readOcspStaple(config.ocsp_staple(), api)), ocsp_staple_path_(Config::DataSource::getPath(config.ocsp_staple()) .value_or(ocsp_staple_.empty() ? EMPTY_STRING : INLINE_STRING)), - private_key_method_( - factory_context != nullptr && config.has_private_key_provider() - ? factory_context->sslContextManager() - .privateKeyMethodManager() - .createPrivateKeyMethodProvider(config.private_key_provider(), *factory_context) - : nullptr) { + private_key_method_(nullptr) { if (config.has_private_key_provider() && config.has_private_key()) { throw EnvoyException(fmt::format( "Certificate configuration can't have both private_key and private_key_provider")); } - if (certificate_chain_.empty() || (private_key_.empty() && private_key_method_ == nullptr)) { - throw EnvoyException(fmt::format("Failed to load incomplete certificate from {}, {}", - certificate_chain_path_, private_key_path_)); + if (config.has_private_key_provider()) { + private_key_method_ = + factory_context.sslContextManager() + .privateKeyMethodManager() + .createPrivateKeyMethodProvider(config.private_key_provider(), factory_context); + } + if (certificate_chain_.empty()) { + throw EnvoyException( + fmt::format("Failed to load incomplete certificate from {}: certificate chain not set", + certificate_chain_path_)); + } + if (private_key_.empty() && private_key_method_ == nullptr) { + if (config.has_private_key_provider()) { + throw EnvoyException(fmt::format("Failed to load private key provider: {}", + config.private_key_provider().provider_name())); + } else { + throw EnvoyException( + fmt::format("Failed to load incomplete private key from path: {}", private_key_path_)); + } } } diff --git a/source/common/ssl/tls_certificate_config_impl.h b/source/common/ssl/tls_certificate_config_impl.h index 088b60e393aaa..074ddcb955a4e 100644 --- a/source/common/ssl/tls_certificate_config_impl.h +++ b/source/common/ssl/tls_certificate_config_impl.h @@ -14,7 +14,7 @@ class TlsCertificateConfigImpl : public TlsCertificateConfig { public: TlsCertificateConfigImpl( const envoy::extensions::transport_sockets::tls::v3::TlsCertificate& config, - Server::Configuration::TransportSocketFactoryContext* factory_context, Api::Api& api); + Server::Configuration::TransportSocketFactoryContext& factory_context, Api::Api& api); const std::string& certificateChain() const override { return certificate_chain_; } const std::string& certificateChainPath() const override { return certificate_chain_path_; } diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index 3c64b6ba49511..4464f41a344e6 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -1,5 +1,6 @@ #include "source/common/stats/allocator_impl.h" +#include #include #include "envoy/stats/stats.h" @@ -25,6 +26,25 @@ const char AllocatorImpl::DecrementToZeroSyncPoint[] = "decrement-zero"; AllocatorImpl::~AllocatorImpl() { ASSERT(counters_.empty()); ASSERT(gauges_.empty()); + +#ifndef NDEBUG + // Move deleted stats into the sets for the ASSERTs in removeFromSetLockHeld to function. + for (auto& counter : deleted_counters_) { + auto insertion = counters_.insert(counter.get()); + // Assert that there were no duplicates. + ASSERT(insertion.second); + } + for (auto& gauge : deleted_gauges_) { + auto insertion = gauges_.insert(gauge.get()); + // Assert that there were no duplicates. + ASSERT(insertion.second); + } + for (auto& text_readout : deleted_text_readouts_) { + auto insertion = text_readouts_.insert(text_readout.get()); + // Assert that there were no duplicates. + ASSERT(insertion.second); + } +#endif } #ifndef ENVOY_CONFIG_COVERAGE @@ -316,5 +336,77 @@ Counter* AllocatorImpl::makeCounterInternal(StatName name, StatName tag_extracte return new CounterImpl(name, *this, tag_extracted_name, stat_name_tags); } +void AllocatorImpl::forEachCounter(std::function f_size, + std::function f_stat) const { + Thread::LockGuard lock(mutex_); + if (f_size != nullptr) { + f_size(counters_.size()); + } + for (auto& counter : counters_) { + f_stat(*counter); + } +} + +void AllocatorImpl::forEachGauge(std::function f_size, + std::function f_stat) const { + Thread::LockGuard lock(mutex_); + if (f_size != nullptr) { + f_size(gauges_.size()); + } + for (auto& gauge : gauges_) { + f_stat(*gauge); + } +} + +void AllocatorImpl::forEachTextReadout(std::function f_size, + std::function f_stat) const { + Thread::LockGuard lock(mutex_); + if (f_size != nullptr) { + f_size(text_readouts_.size()); + } + for (auto& text_readout : text_readouts_) { + f_stat(*text_readout); + } +} + +void AllocatorImpl::markCounterForDeletion(const CounterSharedPtr& counter) { + Thread::LockGuard lock(mutex_); + auto iter = counters_.find(counter->statName()); + if (iter == counters_.end()) { + // This has already been marked for deletion. + return; + } + ASSERT(counter.get() == *iter); + // Duplicates are ASSERTed in ~AllocatorImpl. + deleted_counters_.emplace_back(*iter); + counters_.erase(iter); +} + +void AllocatorImpl::markGaugeForDeletion(const GaugeSharedPtr& gauge) { + Thread::LockGuard lock(mutex_); + auto iter = gauges_.find(gauge->statName()); + if (iter == gauges_.end()) { + // This has already been marked for deletion. + return; + } + ASSERT(gauge.get() == *iter); + // Duplicates are ASSERTed in ~AllocatorImpl. + deleted_gauges_.emplace_back(*iter); + gauges_.erase(iter); +} + +void AllocatorImpl::markTextReadoutForDeletion(const TextReadoutSharedPtr& text_readout) { + Thread::LockGuard lock(mutex_); + auto iter = text_readouts_.find(text_readout->statName()); + if (iter == text_readouts_.end()) { + // This has already been marked for deletion. + return; + } + ASSERT(text_readout.get() == *iter); + // Duplicates are ASSERTed in ~AllocatorImpl. + deleted_text_readouts_.emplace_back(*iter); + text_readouts_.erase(iter); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/allocator_impl.h b/source/common/stats/allocator_impl.h index db656e2e40134..806e7dc8612fd 100644 --- a/source/common/stats/allocator_impl.h +++ b/source/common/stats/allocator_impl.h @@ -33,6 +33,15 @@ class AllocatorImpl : public Allocator { SymbolTable& symbolTable() override { return symbol_table_; } const SymbolTable& constSymbolTable() const override { return symbol_table_; } + void forEachCounter(std::function, + std::function) const override; + + void forEachGauge(std::function, + std::function) const override; + + void forEachTextReadout(std::function, + std::function) const override; + #ifndef ENVOY_CONFIG_COVERAGE void debugPrint(); #endif @@ -47,6 +56,10 @@ class AllocatorImpl : public Allocator { */ bool isMutexLockedForTest(); + void markCounterForDeletion(const CounterSharedPtr& counter) override; + void markGaugeForDeletion(const GaugeSharedPtr& gauge) override; + void markTextReadoutForDeletion(const TextReadoutSharedPtr& text_readout) override; + protected: virtual Counter* makeCounterInternal(StatName name, StatName tag_extracted_name, const StatNameTagVector& stat_name_tags); @@ -58,21 +71,29 @@ class AllocatorImpl : public Allocator { friend class TextReadoutImpl; friend class NotifyingAllocatorImpl; - void removeCounterFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeGaugeFromSetLockHeld(Gauge* gauge) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void removeTextReadoutFromSetLockHeld(Counter* counter) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + // A mutex is needed here to protect both the stats_ object from both + // alloc() and free() operations. Although alloc() operations are called under existing locking, + // free() operations are made from the destructors of the individual stat objects, which are not + // protected by locks. + mutable Thread::MutexBasicLockable mutex_; StatSet counters_ ABSL_GUARDED_BY(mutex_); StatSet gauges_ ABSL_GUARDED_BY(mutex_); StatSet text_readouts_ ABSL_GUARDED_BY(mutex_); - SymbolTable& symbol_table_; + // Retain storage for deleted stats; these are no longer in maps because + // the matcher-pattern was established after they were created. Since the + // stats are held by reference in code that expects them to be there, we + // can't actually delete the stats. + // + // It seems like it would be better to have each client that expects a stat + // to exist to hold it as (e.g.) a CounterSharedPtr rather than a Counter& + // but that would be fairly complex to change. + std::vector deleted_counters_ ABSL_GUARDED_BY(mutex_); + std::vector deleted_gauges_ ABSL_GUARDED_BY(mutex_); + std::vector deleted_text_readouts_ ABSL_GUARDED_BY(mutex_); - // A mutex is needed here to protect both the stats_ object from both - // alloc() and free() operations. Although alloc() operations are called under existing locking, - // free() operations are made from the destructors of the individual stat objects, which are not - // protected by locks. - Thread::MutexBasicLockable mutex_; + SymbolTable& symbol_table_; Thread::ThreadSynchronizer sync_; }; diff --git a/source/common/stats/histogram_impl.h b/source/common/stats/histogram_impl.h index 4090434f46edd..03992a06cd7ba 100644 --- a/source/common/stats/histogram_impl.h +++ b/source/common/stats/histogram_impl.h @@ -28,7 +28,8 @@ class HistogramSettingsImpl : public HistogramSettings { static ConstSupportedBuckets& defaultBuckets(); private: - using Config = std::pair; + using Config = std::pair, + ConstSupportedBuckets>; const std::vector configs_{}; }; diff --git a/source/common/stats/isolated_store_impl.h b/source/common/stats/isolated_store_impl.h index f0f924ebda541..ebff944da7eff 100644 --- a/source/common/stats/isolated_store_impl.h +++ b/source/common/stats/isolated_store_impl.h @@ -2,6 +2,7 @@ #include #include +#include #include #include "envoy/stats/stats.h" @@ -100,6 +101,14 @@ template class IsolatedStatsCache { return true; } + void forEachStat(std::function f_size, + std::function f_stat) const { + f_size(stats_.size()); + for (auto const& stat : stats_) { + f_stat(*stat.second); + } + } + private: friend class IsolatedStoreImpl; @@ -205,6 +214,21 @@ class IsolatedStoreImpl : public StoreImpl { return textReadoutFromStatName(storage.statName()); } + void forEachCounter(std::function f_size, + std::function f_stat) const override { + counters_.forEachStat(f_size, f_stat); + } + + void forEachGauge(std::function f_size, + std::function f_stat) const override { + gauges_.forEachStat(f_size, f_stat); + } + + void forEachTextReadout(std::function f_size, + std::function f_stat) const override { + text_readouts_.forEachStat(f_size, f_stat); + } + private: IsolatedStoreImpl(std::unique_ptr&& symbol_table); diff --git a/source/common/stats/stats_matcher_impl.h b/source/common/stats/stats_matcher_impl.h index b9cad0fa35da2..a0d3a085cc3fb 100644 --- a/source/common/stats/stats_matcher_impl.h +++ b/source/common/stats/stats_matcher_impl.h @@ -52,7 +52,7 @@ class StatsMatcherImpl : public StatsMatcher { OptRef symbol_table_; std::unique_ptr stat_name_pool_; - std::vector matchers_; + std::vector> matchers_; std::vector prefixes_; }; diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 3b29f8b5d4df5..0e88476e5e47f 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -69,10 +69,19 @@ void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { Thread::LockGuard lock(lock_); const uint32_t first_histogram_index = deleted_histograms_.size(); for (ScopeImpl* scope : scopes_) { - removeRejectedStats(scope->central_cache_->counters_, deleted_counters_); - removeRejectedStats(scope->central_cache_->gauges_, deleted_gauges_); + removeRejectedStats(scope->central_cache_->counters_, + [this](const CounterSharedPtr& counter) mutable { + alloc_.markCounterForDeletion(counter); + }); + removeRejectedStats( + scope->central_cache_->gauges_, + [this](const GaugeSharedPtr& gauge) mutable { alloc_.markGaugeForDeletion(gauge); }); removeRejectedStats(scope->central_cache_->histograms_, deleted_histograms_); - removeRejectedStats(scope->central_cache_->text_readouts_, deleted_text_readouts_); + removeRejectedStats( + scope->central_cache_->text_readouts_, + [this](const TextReadoutSharedPtr& text_readout) mutable { + alloc_.markTextReadoutForDeletion(text_readout); + }); } // Remove any newly rejected histograms from histogram_set_. @@ -101,6 +110,23 @@ void ThreadLocalStoreImpl::removeRejectedStats(StatMapClass& map, StatListClass& } } +template +void ThreadLocalStoreImpl::removeRejectedStats( + StatNameHashMap& map, std::function f_deletion) { + StatNameVec remove_list; + for (auto& stat : map) { + if (rejects(stat.first)) { + remove_list.push_back(stat.first); + } + } + for (StatName stat_name : remove_list) { + auto iter = map.find(stat_name); + ASSERT(iter != map.end()); + f_deletion(iter->second); + map.erase(iter); + } +} + StatsMatcher::FastResult ThreadLocalStoreImpl::fastRejects(StatName stat_name) const { return stats_matcher_->fastRejects(stat_name); } @@ -113,16 +139,9 @@ bool ThreadLocalStoreImpl::slowRejects(StatsMatcher::FastResult fast_reject_resu std::vector ThreadLocalStoreImpl::counters() const { // Handle de-dup due to overlapping scopes. std::vector ret; - StatNameHashSet names; - Thread::LockGuard lock(lock_); - for (ScopeImpl* scope : scopes_) { - for (auto& counter : scope->central_cache_->counters_) { - if (names.insert(counter.first).second) { - ret.push_back(counter.second); - } - } - } - + forEachCounter( + [&ret](std::size_t size) mutable { ret.reserve(size); }, + [&ret](Counter& counter) mutable { ret.emplace_back(CounterSharedPtr(&counter)); }); return ret; } @@ -141,34 +160,22 @@ ScopePtr ThreadLocalStoreImpl::scopeFromStatName(StatName name) { std::vector ThreadLocalStoreImpl::gauges() const { // Handle de-dup due to overlapping scopes. std::vector ret; - StatNameHashSet names; - Thread::LockGuard lock(lock_); - for (ScopeImpl* scope : scopes_) { - for (auto& gauge_iter : scope->central_cache_->gauges_) { - const GaugeSharedPtr& gauge = gauge_iter.second; - if (gauge->importMode() != Gauge::ImportMode::Uninitialized && - names.insert(gauge_iter.first).second) { - ret.push_back(gauge); - } - } - } - + forEachGauge([&ret](std::size_t size) mutable { ret.reserve(size); }, + [&ret](Gauge& gauge) mutable { + if (gauge.importMode() != Gauge::ImportMode::Uninitialized) { + ret.emplace_back(GaugeSharedPtr(&gauge)); + } + }); return ret; } std::vector ThreadLocalStoreImpl::textReadouts() const { // Handle de-dup due to overlapping scopes. std::vector ret; - StatNameHashSet names; - Thread::LockGuard lock(lock_); - for (ScopeImpl* scope : scopes_) { - for (auto& text_readout : scope->central_cache_->text_readouts_) { - if (names.insert(text_readout.first).second) { - ret.push_back(text_readout.second); - } - } - } - + forEachTextReadout([&ret](std::size_t size) mutable { ret.reserve(size); }, + [&ret](TextReadout& text_readout) mutable { + ret.emplace_back(TextReadoutSharedPtr(&text_readout)); + }); return ret; } @@ -975,5 +982,24 @@ bool ParentHistogramImpl::usedLockHeld() const { return false; } +void ThreadLocalStoreImpl::forEachCounter(std::function f_size, + std::function f_stat) const { + Thread::LockGuard lock(lock_); + alloc_.forEachCounter(f_size, f_stat); +} + +void ThreadLocalStoreImpl::forEachGauge(std::function f_size, + std::function f_stat) const { + Thread::LockGuard lock(lock_); + alloc_.forEachGauge(f_size, f_stat); +} + +void ThreadLocalStoreImpl::forEachTextReadout( + std::function f_size, + std::function f_stat) const { + Thread::LockGuard lock(lock_); + alloc_.forEachTextReadout(f_size, f_stat); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index eaf2946fd99f7..742e1fc3c04d6 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -244,6 +244,15 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo std::vector textReadouts() const override; std::vector histograms() const override; + void forEachCounter(std::function f_size, + std::function f_stat) const override; + + void forEachGauge(std::function f_size, + std::function f_stat) const override; + + void forEachTextReadout(std::function f_size, + std::function f_stat) const override; + // Stats::StoreRoot void addSink(Sink& sink) override { timer_sinks_.push_back(sink); } void setTagProducer(TagProducerPtr&& tag_producer) override { @@ -483,6 +492,9 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo bool rejectsAll() const { return stats_matcher_->rejectsAll(); } template void removeRejectedStats(StatMapClass& map, StatListClass& list); + template + void removeRejectedStats(StatNameHashMap& map, + std::function f_deletion); bool checkAndRememberRejection(StatName name, StatsMatcher::FastResult fast_reject_result, StatNameStorageSet& central_rejected_stats, StatNameHashSet* tls_rejected_stats); @@ -527,10 +539,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo // It seems like it would be better to have each client that expects a stat // to exist to hold it as (e.g.) a CounterSharedPtr rather than a Counter& // but that would be fairly complex to change. - std::vector deleted_counters_ ABSL_GUARDED_BY(lock_); - std::vector deleted_gauges_ ABSL_GUARDED_BY(lock_); std::vector deleted_histograms_ ABSL_GUARDED_BY(lock_); - std::vector deleted_text_readouts_ ABSL_GUARDED_BY(lock_); // Scope IDs and central cache entries that are queued for cross-scope release. // Because there can be a large number of scopes, all of which are released at once, diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 53d4351749c10..5abd0c7261ec3 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -35,22 +35,25 @@ const ReplacementMap& emptySpaceReplacement() { } // namespace struct StreamInfoImpl : public StreamInfo { - StreamInfoImpl(TimeSource& time_source, - const Network::SocketAddressProviderSharedPtr& downstream_address_provider, - FilterState::LifeSpan life_span = FilterState::LifeSpan::FilterChain) - : StreamInfoImpl(absl::nullopt, time_source, downstream_address_provider, + StreamInfoImpl( + TimeSource& time_source, + const Network::ConnectionInfoProviderSharedPtr& downstream_connection_info_provider, + FilterState::LifeSpan life_span = FilterState::LifeSpan::FilterChain) + : StreamInfoImpl(absl::nullopt, time_source, downstream_connection_info_provider, std::make_shared(life_span)) {} - StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source, - const Network::SocketAddressProviderSharedPtr& downstream_address_provider) - : StreamInfoImpl(protocol, time_source, downstream_address_provider, + StreamInfoImpl( + Http::Protocol protocol, TimeSource& time_source, + const Network::ConnectionInfoProviderSharedPtr& downstream_connection_info_provider) + : StreamInfoImpl(protocol, time_source, downstream_connection_info_provider, std::make_shared(FilterState::LifeSpan::FilterChain)) {} - StreamInfoImpl(Http::Protocol protocol, TimeSource& time_source, - const Network::SocketAddressProviderSharedPtr& downstream_address_provider, - FilterStateSharedPtr parent_filter_state, FilterState::LifeSpan life_span) + StreamInfoImpl( + Http::Protocol protocol, TimeSource& time_source, + const Network::ConnectionInfoProviderSharedPtr& downstream_connection_info_provider, + FilterStateSharedPtr parent_filter_state, FilterState::LifeSpan life_span) : StreamInfoImpl( - protocol, time_source, downstream_address_provider, + protocol, time_source, downstream_connection_info_provider, std::make_shared( FilterStateImpl::LazyCreateAncestor(std::move(parent_filter_state), life_span), FilterState::LifeSpan::FilterChain)) {} @@ -68,6 +71,10 @@ struct StreamInfoImpl : public StreamInfo { start_time_monotonic_); } + void setUpstreamConnectionId(uint64_t id) override { upstream_connection_id_ = id; } + + absl::optional upstreamConnectionId() const override { return upstream_connection_id_; } + absl::optional lastDownstreamRxByteReceived() const override { return duration(last_downstream_rx_byte_received); } @@ -193,17 +200,8 @@ struct StreamInfoImpl : public StreamInfo { void healthCheck(bool is_health_check) override { health_check_request_ = is_health_check; } - const Network::SocketAddressProvider& downstreamAddressProvider() const override { - return *downstream_address_provider_; - } - - void - setDownstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& connection_info) override { - downstream_ssl_info_ = connection_info; - } - - Ssl::ConnectionInfoConstSharedPtr downstreamSslConnection() const override { - return downstream_ssl_info_; + const Network::ConnectionInfoProvider& downstreamAddressProvider() const override { + return *downstream_connection_info_provider_; } void setUpstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& connection_info) override { @@ -214,7 +212,7 @@ struct StreamInfoImpl : public StreamInfo { return upstream_ssl_info_; } - const Router::RouteEntry* routeEntry() const override { return route_entry_; } + Router::RouteConstSharedPtr route() const override { return route_; } envoy::config::core::v3::Metadata& dynamicMetadata() override { return metadata_; }; const envoy::config::core::v3::Metadata& dynamicMetadata() const override { return metadata_; }; @@ -260,8 +258,9 @@ struct StreamInfoImpl : public StreamInfo { void dumpState(std::ostream& os, int indent_level = 0) const { const char* spaces = spacesForLevel(indent_level); - os << spaces << "StreamInfoImpl " << this << DUMP_OPTIONAL_MEMBER(protocol_) - << DUMP_OPTIONAL_MEMBER(response_code_) << DUMP_OPTIONAL_MEMBER(response_code_details_) + os << spaces << "StreamInfoImpl " << this << DUMP_OPTIONAL_MEMBER(upstream_connection_id_) + << DUMP_OPTIONAL_MEMBER(protocol_) << DUMP_OPTIONAL_MEMBER(response_code_) + << DUMP_OPTIONAL_MEMBER(response_code_details_) << DUMP_OPTIONAL_MEMBER(attempt_count_) << DUMP_MEMBER(health_check_request_) << DUMP_MEMBER(route_name_) << "\n"; } @@ -280,6 +279,10 @@ struct StreamInfoImpl : public StreamInfo { const std::string& filterChainName() const override { return filter_chain_name_; } + void setAttemptCount(uint32_t attempt_count) override { attempt_count_ = attempt_count; } + + absl::optional attemptCount() const override { return attempt_count_; } + TimeSource& time_source_; const SystemTime start_time_; const MonotonicTime start_time_monotonic_; @@ -296,35 +299,37 @@ struct StreamInfoImpl : public StreamInfo { uint64_t response_flags_{}; Upstream::HostDescriptionConstSharedPtr upstream_host_{}; bool health_check_request_{}; - const Router::RouteEntry* route_entry_{}; + Router::RouteConstSharedPtr route_; envoy::config::core::v3::Metadata metadata_{}; FilterStateSharedPtr filter_state_; FilterStateSharedPtr upstream_filter_state_; std::string route_name_; + absl::optional upstream_connection_id_; + absl::optional attempt_count_; private: - static Network::SocketAddressProviderSharedPtr emptyDownstreamAddressProvider() { + static Network::ConnectionInfoProviderSharedPtr emptyDownstreamAddressProvider() { MUTABLE_CONSTRUCT_ON_FIRST_USE( - Network::SocketAddressProviderSharedPtr, - std::make_shared(nullptr, nullptr)); + Network::ConnectionInfoProviderSharedPtr, + std::make_shared(nullptr, nullptr)); } - StreamInfoImpl(absl::optional protocol, TimeSource& time_source, - const Network::SocketAddressProviderSharedPtr& downstream_address_provider, - FilterStateSharedPtr filter_state) + StreamInfoImpl( + absl::optional protocol, TimeSource& time_source, + const Network::ConnectionInfoProviderSharedPtr& downstream_connection_info_provider, + FilterStateSharedPtr filter_state) : time_source_(time_source), start_time_(time_source.systemTime()), start_time_monotonic_(time_source.monotonicTime()), protocol_(protocol), filter_state_(std::move(filter_state)), - downstream_address_provider_(downstream_address_provider != nullptr - ? downstream_address_provider - : emptyDownstreamAddressProvider()), + downstream_connection_info_provider_(downstream_connection_info_provider != nullptr + ? downstream_connection_info_provider + : emptyDownstreamAddressProvider()), trace_reason_(Tracing::Reason::NotTraceable) {} uint64_t bytes_received_{}; uint64_t bytes_sent_{}; Network::Address::InstanceConstSharedPtr upstream_local_address_; - const Network::SocketAddressProviderSharedPtr downstream_address_provider_; - Ssl::ConnectionInfoConstSharedPtr downstream_ssl_info_; + const Network::ConnectionInfoProviderSharedPtr downstream_connection_info_provider_; Ssl::ConnectionInfoConstSharedPtr upstream_ssl_info_; std::string requested_server_name_; const Http::RequestHeaderMap* request_headers_{}; diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index 38456bf4a5110..9cfa340eff0d0 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -64,7 +64,6 @@ void ActiveTcpClient::onEvent(Network::ConnectionEvent event) { // This is also necessary for prefetch to be used with such protocols. if (event == Network::ConnectionEvent::Connected) { connection_->readDisable(true); - connection_->streamInfo().setDownstreamSslConnection(connection_->ssl()); } Envoy::ConnectionPool::ActiveClient::onEvent(event); if (callbacks_) { diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index 398254b498461..949feeb7ee7a2 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -176,10 +176,10 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, } ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override { TcpAttachContext context(&callbacks); - return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); + return newStreamImpl(context); } bool maybePreconnect(float preconnect_ratio) override { - return Envoy::ConnectionPool::ConnPoolImplBase::maybePreconnect(preconnect_ratio); + return maybePreconnectImpl(preconnect_ratio); } ConnectionPool::Cancellable* diff --git a/source/common/tcp/original_conn_pool.cc b/source/common/tcp/original_conn_pool.cc index cb4bf71b6735e..325c424aa61c6 100644 --- a/source/common/tcp/original_conn_pool.cc +++ b/source/common/tcp/original_conn_pool.cc @@ -225,7 +225,6 @@ void OriginalConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::Connecti // whether the connection is in the ready list (connected) or the pending list (failed to // connect). if (event == Network::ConnectionEvent::Connected) { - conn.conn_->streamInfo().setDownstreamSslConnection(conn.conn_->ssl()); conn_connect_ms_->complete(); processIdleConnection(conn, true, false); } diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index a369af7358d16..5cb4675975b97 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -58,24 +58,24 @@ Config::RouteImpl::RouteImpl( bool Config::RouteImpl::matches(Network::Connection& connection) const { if (!source_port_ranges_.empty() && - !Network::Utility::portInRangeList(*connection.addressProvider().remoteAddress(), + !Network::Utility::portInRangeList(*connection.connectionInfoProvider().remoteAddress(), source_port_ranges_)) { return false; } if (!source_ips_.empty() && - !source_ips_.contains(*connection.addressProvider().remoteAddress())) { + !source_ips_.contains(*connection.connectionInfoProvider().remoteAddress())) { return false; } if (!destination_port_ranges_.empty() && - !Network::Utility::portInRangeList(*connection.addressProvider().localAddress(), + !Network::Utility::portInRangeList(*connection.connectionInfoProvider().localAddress(), destination_port_ranges_)) { return false; } if (!destination_ips_.empty() && - !destination_ips_.contains(*connection.addressProvider().localAddress())) { + !destination_ips_.contains(*connection.connectionInfoProvider().localAddress())) { return false; } @@ -138,13 +138,6 @@ Config::Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProx return drain_manager; }); - if (config.has_hidden_envoy_deprecated_deprecated_v1()) { - for (const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute& - route_desc : config.hidden_envoy_deprecated_deprecated_v1().routes()) { - routes_.emplace_back(std::make_shared(*this, route_desc)); - } - } - if (!config.cluster().empty()) { envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute default_route; @@ -431,9 +424,9 @@ Network::FilterStatus Filter::initializeUpstreamConnection() { Network::ProxyProtocolFilterState::key())) { read_callbacks_->connection().streamInfo().filterState()->setData( Network::ProxyProtocolFilterState::key(), - std::make_unique( - Network::ProxyProtocolData{downstreamConnection()->addressProvider().remoteAddress(), - downstreamConnection()->addressProvider().localAddress()}), + std::make_unique(Network::ProxyProtocolData{ + downstreamConnection()->connectionInfoProvider().remoteAddress(), + downstreamConnection()->connectionInfoProvider().localAddress()}), StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection); } @@ -485,6 +478,7 @@ bool Filter::maybeTunnel(Upstream::ThreadLocalCluster& cluster) { if (generic_conn_pool_) { connecting_ = true; connect_attempts_++; + getStreamInfo().setAttemptCount(connect_attempts_); generic_conn_pool_->newStream(*this); // Because we never return open connections to the pool, this either has a handle waiting on // connection completion, or onPoolFailure has been invoked. Either way, stop iteration. diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index 7e22c3273bc62..b157db0e1b845 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -267,8 +267,8 @@ class Filter : public Network::ReadFilter, auto hash_policy = config_->hashPolicy(); if (hash_policy) { return hash_policy->generateHash( - downstreamConnection()->addressProvider().remoteAddress().get(), - downstreamConnection()->addressProvider().localAddress().get()); + downstreamConnection()->connectionInfoProvider().remoteAddress().get(), + downstreamConnection()->connectionInfoProvider().localAddress().get()); } return {}; diff --git a/source/common/tcp_proxy/upstream.cc b/source/common/tcp_proxy/upstream.cc index 9aadc83d159f9..8a66a0ad8161b 100644 --- a/source/common/tcp_proxy/upstream.cc +++ b/source/common/tcp_proxy/upstream.cc @@ -185,9 +185,10 @@ void TcpConnPool::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data Network::Connection& connection = conn_data->connection(); auto upstream = std::make_unique(std::move(conn_data), upstream_callbacks_); - callbacks_->onGenericPoolReady(&connection.streamInfo(), std::move(upstream), host, - latched_data->connection().addressProvider().localAddress(), - latched_data->connection().streamInfo().downstreamSslConnection()); + callbacks_->onGenericPoolReady( + &connection.streamInfo(), std::move(upstream), host, + latched_data->connection().connectionInfoProvider().localAddress(), + latched_data->connection().streamInfo().downstreamAddressProvider().sslConnection()); } HttpConnPool::HttpConnPool(Upstream::ThreadLocalCluster& thread_local_cluster, @@ -233,8 +234,8 @@ void HttpConnPool::onPoolReady(Http::RequestEncoder& request_encoder, upstream_handle_ = nullptr; upstream_->setRequestEncoder(request_encoder, host->transportSocketFactory().implementsSecureTransport()); - upstream_->setConnPoolCallbacks( - std::make_unique(*this, host, info.downstreamSslConnection())); + upstream_->setConnPoolCallbacks(std::make_unique( + *this, host, info.downstreamAddressProvider().sslConnection())); } void HttpConnPool::onGenericPoolReady(Upstream::HostDescriptionConstSharedPtr& host, diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index 7df66b8189d18..84e7168a68e92 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -306,7 +306,7 @@ absl::string_view RequestHeaderCustomTag::value(const CustomTagContext& ctx) con return default_value_; } // TODO(https://github.com/envoyproxy/envoy/issues/13454): Potentially populate all header values. - const auto entry = ctx.trace_context->getTraceContext(name_); + const auto entry = ctx.trace_context->getByKey(name_); return entry.value_or(default_value_); } @@ -355,8 +355,8 @@ MetadataCustomTag::metadata(const CustomTagContext& ctx) const { case envoy::type::metadata::v3::MetadataKind::KindCase::kRequest: return &info.dynamicMetadata(); case envoy::type::metadata::v3::MetadataKind::KindCase::kRoute: { - const Router::RouteEntry* route_entry = info.routeEntry(); - return route_entry ? &route_entry->metadata() : nullptr; + Router::RouteConstSharedPtr route = info.route(); + return route ? &route->metadata() : nullptr; } case envoy::type::metadata::v3::MetadataKind::KindCase::kCluster: { const auto& hostPtr = info.upstreamHost(); diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 13dcb3846af3b..cca323285187e 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -70,7 +70,6 @@ envoy_cc_library( "//source/common/config:grpc_mux_lib", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", - "//source/common/config:version_converter_lib", "//source/common/config:xds_resource_lib", "//source/common/grpc:async_client_manager_lib", "//source/common/http:async_client_lib", @@ -148,9 +147,17 @@ envoy_cc_library( ) envoy_cc_library( - name = "edf_scheduler_lib", - hdrs = ["edf_scheduler.h"], - deps = ["//source/common/common:assert_lib"], + name = "scheduler_lib", + hdrs = [ + "edf_scheduler.h", + "wrsq_scheduler.h", + ], + deps = [ + "//envoy/common:random_generator_interface", + "//envoy/upstream:scheduler_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + ], ) envoy_cc_library( @@ -198,7 +205,7 @@ envoy_cc_library( srcs = ["load_balancer_impl.cc"], hdrs = ["load_balancer_impl.h"], deps = [ - ":edf_scheduler_lib", + ":scheduler_lib", "//envoy/common:random_generator_interface", "//envoy/runtime:runtime_interface", "//envoy/stats:stats_interface", @@ -211,6 +218,14 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "load_balancer_factory_base_lib", + hdrs = ["load_balancer_factory_base.h"], + deps = [ + ":load_balancer_lib", + ], +) + envoy_cc_library( name = "load_stats_reporter_lib", srcs = ["load_stats_reporter.cc"], @@ -220,7 +235,6 @@ envoy_cc_library( "//envoy/stats:stats_macros", "//envoy/upstream:cluster_manager_interface", "//source/common/common:minimal_logger_lib", - "//source/common/config:version_converter_lib", "//source/common/grpc:async_client_lib", "@envoy_api//envoy/service/load_stats/v3:pkg_cc_proto", ], @@ -255,7 +269,6 @@ envoy_cc_library( "//source/common/common:backoff_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", - "//source/common/config:version_converter_lib", "//source/common/grpc:async_client_lib", "//source/common/network:resolver_lib", "//source/common/protobuf:message_validator_lib", @@ -409,12 +422,10 @@ envoy_cc_library( "//source/common/config:subscription_base_interface", "//source/common/config:subscription_factory_lib", "//source/common/config:utility_lib", - "//source/common/config:version_converter_lib", "//source/common/network:address_lib", "//source/common/network:resolver_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", @@ -422,6 +433,29 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "leds_lib", + srcs = ["leds.cc"], + hdrs = ["leds.h"], + deps = [ + ":upstream_includes", + "//envoy/config:grpc_mux_interface", + "//envoy/config:subscription_factory_interface", + "//envoy/config:subscription_interface", + "//envoy/local_info:local_info_interface", + "//source/common/config:decoded_resource_lib", + "//source/common/config:subscription_base_interface", + "//source/common/config:subscription_factory_lib", + "//source/common/config:utility_lib", + "//source/common/network:address_lib", + "//source/common/network:resolver_lib", + "//source/common/network:utility_lib", + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "subset_lb_lib", srcs = ["subset_lb.cc"], @@ -472,6 +506,7 @@ envoy_cc_library( "//source/common/http/http2:codec_stats_lib", "//source/common/http:utility_lib", "//source/common/network:address_lib", + "//source/common/network:happy_eyeballs_connection_impl_lib", "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", "//source/common/network:socket_option_lib", diff --git a/source/common/upstream/cds_api_helper.cc b/source/common/upstream/cds_api_helper.cc index c75b1526d2f08..ddfb2f6a78d08 100644 --- a/source/common/upstream/cds_api_helper.cc +++ b/source/common/upstream/cds_api_helper.cc @@ -19,9 +19,8 @@ CdsApiHelper::onConfigUpdate(const std::vector& adde const std::string& system_version_info) { Config::ScopedResume maybe_resume_eds; if (cm_.adsMux()) { - const auto type_urls = - Config::getAllVersionTypeUrls(); - maybe_resume_eds = cm_.adsMux()->pause(type_urls); + const auto type_url = Config::getTypeUrl(); + maybe_resume_eds = cm_.adsMux()->pause(type_url); } ENVOY_LOG(info, "{}: add {} cluster(s), remove {} cluster(s)", name_, added_resources.size(), diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index 3c9d8ab46b6dc..64c2c5a25cceb 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -20,8 +20,8 @@ CdsApiImpl::CdsApiImpl(const envoy::config::core::v3::ConfigSource& cds_config, const xds::core::v3::ResourceLocator* cds_resources_locator, ClusterManager& cm, Stats::Scope& scope, ProtobufMessage::ValidationVisitor& validation_visitor) - : Envoy::Config::SubscriptionBase( - cds_config.resource_api_version(), validation_visitor, "name"), + : Envoy::Config::SubscriptionBase(validation_visitor, + "name"), helper_(cm, "cds"), cm_(cm), scope_(scope.createScope("cluster_manager.cds.")) { const auto resource_name = getResourceName(); if (cds_resources_locator == nullptr) { diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index ab599bd092b66..2e90917e28b56 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -123,13 +123,16 @@ std::pair ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context) { auto stats_scope = generateStatsScope(cluster, context.stats()); - Server::Configuration::TransportSocketFactoryContextImpl factory_context( - context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(), - context.localInfo(), context.dispatcher(), context.stats(), context.singletonManager(), - context.tls(), context.messageValidationVisitor(), context.api(), context.options()); + std::unique_ptr + transport_factory_context = + std::make_unique( + context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(), + context.localInfo(), context.dispatcher(), context.stats(), + context.singletonManager(), context.tls(), context.messageValidationVisitor(), + context.api(), context.options()); std::pair new_cluster_pair = - createClusterImpl(cluster, context, factory_context, std::move(stats_scope)); + createClusterImpl(cluster, context, *transport_factory_context, std::move(stats_scope)); if (!cluster.health_checks().empty()) { // TODO(htuch): Need to support multiple health checks in v2. @@ -146,6 +149,8 @@ ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluste new_cluster_pair.first->setOutlierDetector(Outlier::DetectorImplFactory::createForCluster( *new_cluster_pair.first, cluster, context.dispatcher(), context.runtime(), context.outlierEventLogger())); + + new_cluster_pair.first->setTransportFactoryContext(std::move(transport_factory_context)); return new_cluster_pair; } diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h index acae527f57268..a1a2020cc834e 100644 --- a/source/common/upstream/cluster_factory_impl.h +++ b/source/common/upstream/cluster_factory_impl.h @@ -43,6 +43,7 @@ #include "source/common/upstream/outlier_detection_impl.h" #include "source/common/upstream/resource_manager_impl.h" #include "source/common/upstream/upstream_impl.h" +#include "source/server/transport_socket_config_impl.h" namespace Envoy { namespace Upstream { @@ -175,9 +176,9 @@ template class ConfigurableClusterFactoryBase : public Clust Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context, Stats::ScopePtr&& stats_scope) override { ProtobufTypes::MessagePtr config = createEmptyConfigProto(); - Config::Utility::translateOpaqueConfig( - cluster.cluster_type().typed_config(), ProtobufWkt::Struct::default_instance(), - socket_factory_context.messageValidationVisitor(), *config); + Config::Utility::translateOpaqueConfig(cluster.cluster_type().typed_config(), + socket_factory_context.messageValidationVisitor(), + *config); return createClusterWithConfig(cluster, MessageUtil::downcastAndValidate( *config, context.messageValidationVisitor()), diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 42b49a1cd751f..f2c9ff1271b99 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -23,7 +23,6 @@ #include "source/common/common/utility.h" #include "source/common/config/new_grpc_mux_impl.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/config/xds_resource.h" #include "source/common/grpc/async_client_manager_impl.h" #include "source/common/http/async_client_impl.h" @@ -185,9 +184,9 @@ void ClusterManagerInitHelper::maybeFinishInitialize() { // avoid double pause ClusterLoadAssignment. Config::ScopedResume maybe_resume_eds; if (cm_.adsMux()) { - const auto type_urls = - Config::getAllVersionTypeUrls(); - maybe_resume_eds = cm_.adsMux()->pause(type_urls); + const auto type_url = + Config::getTypeUrl(); + maybe_resume_eds = cm_.adsMux()->pause(type_url); } initializeSecondaryClusters(); } @@ -338,22 +337,18 @@ ClusterManagerImpl::ClusterManagerImpl( if (dyn_resources.has_ads_config()) { if (dyn_resources.ads_config().api_type() == envoy::config::core::v3::ApiConfigSource::DELTA_GRPC) { + Config::Utility::checkTransportVersion(dyn_resources.ads_config()); ads_mux_ = std::make_shared( Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, dyn_resources.ads_config(), stats, false) ->createUncachedRawAsyncClient(), main_thread_dispatcher, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - Config::Utility::getAndCheckTransportVersion(dyn_resources.ads_config()) == - envoy::config::core::v3::ApiVersion::V3 - // TODO(htuch): consolidate with type_to_endpoint.cc, once we sort out the future - // direction of that module re: https://github.com/envoyproxy/envoy/issues/10650. - ? "envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources" - : "envoy.service.discovery.v2.AggregatedDiscoveryService." - "DeltaAggregatedResources"), - Config::Utility::getAndCheckTransportVersion(dyn_resources.ads_config()), random_, stats_, + "envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources"), + random_, stats_, Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), local_info); } else { + Config::Utility::checkTransportVersion(dyn_resources.ads_config()); ads_mux_ = std::make_shared( local_info, Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, @@ -361,15 +356,8 @@ ClusterManagerImpl::ClusterManagerImpl( ->createUncachedRawAsyncClient(), main_thread_dispatcher, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - Config::Utility::getAndCheckTransportVersion(dyn_resources.ads_config()) == - envoy::config::core::v3::ApiVersion::V3 - // TODO(htuch): consolidate with type_to_endpoint.cc, once we sort out the future - // direction of that module re: https://github.com/envoyproxy/envoy/issues/10650. - ? "envoy.service.discovery.v3.AggregatedDiscoveryService." - "StreamAggregatedResources" - : "envoy.service.discovery.v2.AggregatedDiscoveryService." - "StreamAggregatedResources"), - Config::Utility::getAndCheckTransportVersion(dyn_resources.ads_config()), random_, stats_, + "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources"), + random_, stats_, Envoy::Config::Utility::parseRateLimitSettings(dyn_resources.ads_config()), bootstrap.dynamic_resources().ads_config().set_node_on_first_message_only()); } @@ -444,12 +432,13 @@ void ClusterManagerImpl::initializeSecondaryClusters( if (cm_config.has_load_stats_config()) { const auto& load_stats_config = cm_config.load_stats_config(); + Config::Utility::checkTransportVersion(load_stats_config); load_stats_reporter_ = std::make_unique( local_info_, *this, stats_, Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, load_stats_config, stats_, false) ->createUncachedRawAsyncClient(), - Config::Utility::getAndCheckTransportVersion(load_stats_config), dispatcher_); + dispatcher_); } } @@ -485,7 +474,7 @@ void ClusterManagerImpl::onClusterInit(ClusterManagerCluster& cm_cluster) { if (cluster.info()->lbConfig().close_connections_on_host_set_change()) { for (const auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { // This will drain all tcp and http connection pools. - postThreadLocalDrainConnections(cluster, host_set->hosts()); + postThreadLocalRemoveHosts(cluster, host_set->hosts()); } } else { // TODO(snowp): Should this be subject to merge windows? @@ -495,7 +484,7 @@ void ClusterManagerImpl::onClusterInit(ClusterManagerCluster& cm_cluster) { // enabled, this case will be covered by first `if` statement, where all // connection pools are drained. if (!hosts_removed.empty()) { - postThreadLocalDrainConnections(cluster, hosts_removed); + postThreadLocalRemoveHosts(cluster, hosts_removed); } } }); @@ -793,6 +782,9 @@ ClusterManagerImpl::loadCluster(const envoy::config::cluster::v3::Cluster& clust if (new_cluster->outlierDetector() != nullptr) { new_cluster->outlierDetector()->addChangedStateCb([this](HostSharedPtr host) { if (host->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) { + ENVOY_LOG_EVENT(debug, "outlier_detection_ejection", + "host {} in cluster {} was ejected by the outlier detector", + host->address(), host->cluster().name()); postThreadLocalHealthFailure(host); } }); @@ -831,6 +823,13 @@ ClusterManagerImpl::loadCluster(const envoy::config::cluster::v3::Cluster& clust } } else if (cluster_reference.info()->lbType() == LoadBalancerType::ClusterProvided) { cluster_entry_it->second->thread_aware_lb_ = std::move(new_cluster_pair.second); + } else if (cluster_reference.info()->lbType() == LoadBalancerType::LoadBalancingPolicyConfig) { + const auto& policy = cluster_reference.info()->loadBalancingPolicy(); + TypedLoadBalancerFactory* typed_lb_factory = cluster_reference.info()->loadBalancerFactory(); + RELEASE_ASSERT(typed_lb_factory != nullptr, "ClusterInfo should contain a valid factory"); + cluster_entry_it->second->thread_aware_lb_ = + typed_lb_factory->create(cluster_reference.prioritySet(), cluster_reference.info()->stats(), + cluster_reference.info()->statsScope(), runtime_, random_, policy); } updateClusterCounts(); @@ -852,10 +851,10 @@ void ClusterManagerImpl::updateClusterCounts() { const bool all_clusters_initialized = init_helper_.state() == ClusterManagerInitHelper::State::AllClustersInitialized; if (all_clusters_initialized && ads_mux_) { - const auto type_urls = Config::getAllVersionTypeUrls(); + const auto type_url = Config::getTypeUrl(); const uint64_t previous_warming = cm_stats_.warming_clusters_.value(); if (previous_warming == 0 && !warming_clusters_.empty()) { - resume_cds_ = ads_mux_->pause(type_urls); + resume_cds_ = ads_mux_->pause(type_url); } else if (previous_warming > 0 && warming_clusters_.empty()) { ASSERT(resume_cds_ != nullptr); resume_cds_.reset(); @@ -880,7 +879,7 @@ void ClusterManagerImpl::maybePreconnect( ThreadLocalClusterManagerImpl::ClusterEntry& cluster_entry, const ClusterConnectivityState& state, std::function pick_preconnect_pool) { - auto peekahead_ratio = cluster_entry.cluster_info_->peekaheadRatio(); + auto peekahead_ratio = cluster_entry.info()->peekaheadRatio(); if (peekahead_ratio <= 1.0) { return; } @@ -913,7 +912,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::httpConnPool( ResourcePriority priority, absl::optional protocol, LoadBalancerContext* context) { // Select a host and create a connection pool for it if it does not already exist. - auto pool = connPool(priority, protocol, context, false); + auto pool = httpConnPoolImpl(priority, protocol, context, false); if (pool == nullptr) { return absl::nullopt; } @@ -923,7 +922,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::httpConnPool( // Now that a new stream is being established, attempt to preconnect. maybePreconnect(*this, parent_.cluster_manager_state_, [this, &priority, &protocol, &context]() { - return connPool(priority, protocol, context, true); + return httpConnPoolImpl(priority, protocol, context, true); }); }, pool); @@ -934,7 +933,7 @@ absl::optional ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( ResourcePriority priority, LoadBalancerContext* context) { // Select a host and create a connection pool for it if it does not already exist. - auto pool = tcpConnPool(priority, context, false); + auto pool = tcpConnPoolImpl(priority, context, false); if (pool == nullptr) { return absl::nullopt; } @@ -942,15 +941,38 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( TcpPoolData data( [this, priority, context]() -> void { maybePreconnect(*this, parent_.cluster_manager_state_, [this, &priority, &context]() { - return tcpConnPool(priority, context, true); + return tcpConnPoolImpl(priority, context, true); }); }, pool); return data; } -void ClusterManagerImpl::postThreadLocalDrainConnections(const Cluster& cluster, - const HostVector& hosts_removed) { +void ClusterManagerImpl::drainConnections(const std::string& cluster) { + ENVOY_LOG_EVENT(debug, "drain_connections_call", "drainConnections called for cluster {}", + cluster); + + tls_.runOnAllThreads([cluster](OptRef cluster_manager) { + auto cluster_entry = cluster_manager->thread_local_clusters_.find(cluster); + if (cluster_entry != cluster_manager->thread_local_clusters_.end()) { + cluster_entry->second->drainAllConnPools(); + } + }); +} + +void ClusterManagerImpl::drainConnections() { + ENVOY_LOG_EVENT(debug, "drain_connections_call_for_all_clusters", + "drainConnections called for all clusters"); + + tls_.runOnAllThreads([](OptRef cluster_manager) { + for (const auto& cluster_entry : cluster_manager->thread_local_clusters_) { + cluster_entry.second->drainAllConnPools(); + } + }); +} + +void ClusterManagerImpl::postThreadLocalRemoveHosts(const Cluster& cluster, + const HostVector& hosts_removed) { tls_.runOnAllThreads([name = cluster.info()->name(), hosts_removed](OptRef cluster_manager) { cluster_manager->removeHosts(name, hosts_removed); @@ -978,35 +1000,37 @@ void ClusterManagerImpl::postThreadLocalClusterUpdate(ClusterManagerCluster& cm_ per_priority.overprovisioning_factor_ = host_set->overprovisioningFactor(); } - tls_.runOnAllThreads( - [info = cm_cluster.cluster().info(), params = std::move(params), add_or_update_cluster, - load_balancer_factory](OptRef cluster_manager) { - ThreadLocalClusterManagerImpl::ClusterEntry* new_cluster = nullptr; - if (add_or_update_cluster) { - if (cluster_manager->thread_local_clusters_.count(info->name()) > 0) { - ENVOY_LOG(debug, "updating TLS cluster {}", info->name()); - } else { - ENVOY_LOG(debug, "adding TLS cluster {}", info->name()); - } + HostMapConstSharedPtr host_map = cm_cluster.cluster().prioritySet().crossPriorityHostMap(); - new_cluster = new ThreadLocalClusterManagerImpl::ClusterEntry(*cluster_manager, info, - load_balancer_factory); - cluster_manager->thread_local_clusters_[info->name()].reset(new_cluster); - } + tls_.runOnAllThreads([info = cm_cluster.cluster().info(), params = std::move(params), + add_or_update_cluster, load_balancer_factory, map = std::move(host_map)]( + OptRef cluster_manager) { + ThreadLocalClusterManagerImpl::ClusterEntry* new_cluster = nullptr; + if (add_or_update_cluster) { + if (cluster_manager->thread_local_clusters_.count(info->name()) > 0) { + ENVOY_LOG(debug, "updating TLS cluster {}", info->name()); + } else { + ENVOY_LOG(debug, "adding TLS cluster {}", info->name()); + } - for (const auto& per_priority : params.per_priority_update_params_) { - cluster_manager->updateClusterMembership( - info->name(), per_priority.priority_, per_priority.update_hosts_params_, - per_priority.locality_weights_, per_priority.hosts_added_, - per_priority.hosts_removed_, per_priority.overprovisioning_factor_); - } + new_cluster = new ThreadLocalClusterManagerImpl::ClusterEntry(*cluster_manager, info, + load_balancer_factory); + cluster_manager->thread_local_clusters_[info->name()].reset(new_cluster); + } - if (new_cluster != nullptr) { - for (auto& cb : cluster_manager->update_callbacks_) { - cb->onClusterAddOrUpdate(*new_cluster); - } - } - }); + for (const auto& per_priority : params.per_priority_update_params_) { + cluster_manager->updateClusterMembership( + info->name(), per_priority.priority_, per_priority.update_hosts_params_, + per_priority.locality_weights_, per_priority.hosts_added_, per_priority.hosts_removed_, + per_priority.overprovisioning_factor_, map); + } + + if (new_cluster != nullptr) { + for (auto& cb : cluster_manager->update_callbacks_) { + cb->onClusterAddOrUpdate(*new_cluster); + } + } + }); } void ClusterManagerImpl::postThreadLocalHealthFailure(const HostSharedPtr& host) { @@ -1042,6 +1066,29 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::httpAsyncClient return http_async_client_; } +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::updateHosts( + const std::string& name, uint32_t priority, + PrioritySet::UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, + const HostVector& hosts_removed, absl::optional overprovisioning_factor, + HostMapConstSharedPtr cross_priority_host_map) { + ENVOY_LOG(debug, "membership update for TLS cluster {} added {} removed {}", name, + hosts_added.size(), hosts_removed.size()); + priority_set_.updateHosts(priority, std::move(update_hosts_params), std::move(locality_weights), + hosts_added, hosts_removed, overprovisioning_factor, + std::move(cross_priority_host_map)); + // If an LB is thread aware, create a new worker local LB on membership changes. + if (lb_factory_ != nullptr) { + ENVOY_LOG(debug, "re-creating local LB for TLS cluster {}", name); + lb_ = lb_factory_->create(); + } +} + +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::drainConnPools( + const HostVector& hosts_removed) { + parent_.drainConnPools(hosts_removed); +} + ClusterUpdateCallbacksHandlePtr ClusterManagerImpl::addThreadLocalClusterUpdateCallbacks(ClusterUpdateCallbacks& cb) { ThreadLocalClusterManagerImpl& cluster_manager = *tls_; @@ -1059,13 +1106,13 @@ ClusterManagerImpl::dumpClusterConfigs(const Matchers::StringMatcher& name_match } if (!cluster.added_via_api_) { auto& static_cluster = *config_dump->mutable_static_clusters()->Add(); - static_cluster.mutable_cluster()->PackFrom(API_RECOVER_ORIGINAL(cluster.cluster_config_)); + static_cluster.mutable_cluster()->PackFrom(cluster.cluster_config_); TimestampUtil::systemClockToTimestamp(cluster.last_updated_, *(static_cluster.mutable_last_updated())); } else { auto& dynamic_cluster = *config_dump->mutable_dynamic_active_clusters()->Add(); dynamic_cluster.set_version_info(cluster.version_info_); - dynamic_cluster.mutable_cluster()->PackFrom(API_RECOVER_ORIGINAL(cluster.cluster_config_)); + dynamic_cluster.mutable_cluster()->PackFrom(cluster.cluster_config_); TimestampUtil::systemClockToTimestamp(cluster.last_updated_, *(dynamic_cluster.mutable_last_updated())); } @@ -1078,7 +1125,7 @@ ClusterManagerImpl::dumpClusterConfigs(const Matchers::StringMatcher& name_match } auto& dynamic_cluster = *config_dump->mutable_dynamic_warming_clusters()->Add(); dynamic_cluster.set_version_info(cluster.version_info_); - dynamic_cluster.mutable_cluster()->PackFrom(API_RECOVER_ORIGINAL(cluster.cluster_config_)); + dynamic_cluster.mutable_cluster()->PackFrom(cluster.cluster_config_); TimestampUtil::systemClockToTimestamp(cluster.last_updated_, *(dynamic_cluster.mutable_last_updated())); } @@ -1096,7 +1143,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ThreadLocalClusterManagerImpl ENVOY_LOG(debug, "adding TLS local cluster {}", local_cluster_name); thread_local_clusters_[local_cluster_name] = std::make_unique( *this, local_cluster_params->info_, local_cluster_params->load_balancer_factory_); - local_priority_set_ = &thread_local_clusters_[local_cluster_name]->priority_set_; + local_priority_set_ = &thread_local_clusters_[local_cluster_name]->prioritySet(); } } @@ -1111,7 +1158,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::~ThreadLocalClusterManagerImp host_tcp_conn_pool_map_.clear(); ASSERT(host_tcp_conn_map_.empty()); for (auto& cluster : thread_local_clusters_) { - if (&cluster.second->priority_set_ != local_priority_set_) { + if (&cluster.second->prioritySet() != local_priority_set_) { cluster.second.reset(); } } @@ -1197,74 +1244,31 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::removeHosts( // We need to go through and purge any connection pools for hosts that got deleted. // Even if two hosts actually point to the same address this will be safe, since if a // host is readded it will be a different physical HostSharedPtr. - cluster_entry->parent_.drainConnPools(hosts_removed); + cluster_entry->drainConnPools(hosts_removed); } void ClusterManagerImpl::ThreadLocalClusterManagerImpl::updateClusterMembership( const std::string& name, uint32_t priority, PrioritySet::UpdateHostsParams update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, - const HostVector& hosts_removed, uint64_t overprovisioning_factor) { + const HostVector& hosts_removed, uint64_t overprovisioning_factor, + HostMapConstSharedPtr cross_priority_host_map) { ASSERT(thread_local_clusters_.find(name) != thread_local_clusters_.end()); const auto& cluster_entry = thread_local_clusters_[name]; - ENVOY_LOG(debug, "membership update for TLS cluster {} added {} removed {}", name, - hosts_added.size(), hosts_removed.size()); - cluster_entry->priority_set_.updateHosts(priority, std::move(update_hosts_params), - std::move(locality_weights), hosts_added, hosts_removed, - overprovisioning_factor); - - // If an LB is thread aware, create a new worker local LB on membership changes. - if (cluster_entry->lb_factory_ != nullptr) { - ENVOY_LOG(debug, "re-creating local LB for TLS cluster {}", name); - cluster_entry->lb_ = cluster_entry->lb_factory_->create(); - } + cluster_entry->updateHosts(name, priority, std::move(update_hosts_params), + std::move(locality_weights), hosts_added, hosts_removed, + overprovisioning_factor, std::move(cross_priority_host_map)); } void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( const HostSharedPtr& host) { - // Drain all HTTP connection pool connections in the case of a host health failure. If outlier/ - // health is due to `ECMP` flow hashing issues for example, a new set of connections might do - // better. + // Drain all HTTP and TCP connection pool connections in the case of a host health failure. If + // outlier/ health is due to `ECMP` flow hashing issues for example, a new set of connections + // might do better. // TODO(mattklein123): This function is currently very specific, but in the future when we do // more granular host set changes, we should be able to capture single host changes and make them // more targeted. - { - const auto container = getHttpConnPoolsContainer(host); - if (container != nullptr) { - container->do_not_delete_ = true; - container->pools_->drainConnections(); - container->do_not_delete_ = false; - - if (container->pools_->size() == 0) { - host_http_conn_pool_map_.erase(host); - } - } - } - { - // Drain or close any TCP connection pool for the host. Draining a TCP pool doesn't lead to - // connections being closed, it only prevents new connections through the pool. The - // CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE can be used to make the pool close any - // active connections. - const auto& container = host_tcp_conn_pool_map_.find(host); - if (container != host_tcp_conn_pool_map_.end()) { - // Draining pools or closing connections can cause pool deletion if it becomes - // idle. Copy `pools_` so that we aren't iterating through a container that - // gets mutated by callbacks deleting from it. - std::vector pools; - for (const auto& pair : container->second.pools_) { - pools.push_back(pair.second.get()); - } - - for (auto* pool : pools) { - if (host->cluster().features() & - ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) { - pool->closeConnections(); - } else { - pool->drainConnections(); - } - } - } - } + drainAllConnPoolsWorker(host); if (host->cluster().features() & ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) { @@ -1350,6 +1354,7 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( break; } case LoadBalancerType::ClusterProvided: + case LoadBalancerType::LoadBalancingPolicyConfig: case LoadBalancerType::RingHash: case LoadBalancerType::Maglev: case LoadBalancerType::OriginalDst: { @@ -1361,6 +1366,64 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::ClusterEntry( } } +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::drainConnPools() { + for (auto& host_set : priority_set_.hostSetsPerPriority()) { + parent_.drainConnPools(host_set->hosts()); + } +} + +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::drainAllConnPools() { + for (auto& host_set : priority_set_.hostSetsPerPriority()) { + for (const HostSharedPtr& host : host_set->hosts()) { + parent_.drainAllConnPoolsWorker(host); + } + } +} + +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainAllConnPoolsWorker( + const HostSharedPtr& host) { + // Drain or close any HTTP connection pool for the host. Draining an HTTP pool only leads to + // idle connections being closed. Non-idle connections are marked as draining and prevents new + // streams to go through them, causing new connections to be opened. + { + const auto container = getHttpConnPoolsContainer(host); + if (container != nullptr) { + container->do_not_delete_ = true; + container->pools_->drainConnections(); + container->do_not_delete_ = false; + + if (container->pools_->size() == 0) { + host_http_conn_pool_map_.erase(host); + } + } + } + { + // Drain or close any TCP connection pool for the host. Draining a TCP pool doesn't lead to + // connections being closed, it only prevents new connections through the pool. The + // CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE can be used to make the pool close any + // active connections. + const auto& container = host_tcp_conn_pool_map_.find(host); + if (container != host_tcp_conn_pool_map_.end()) { + // Draining pools or closing connections can cause pool deletion if it becomes + // idle. Copy `pools_` so that we aren't iterating through a container that + // gets mutated by callbacks deleting from it. + std::vector pools; + for (const auto& pair : container->second.pools_) { + pools.push_back(pair.second.get()); + } + + for (auto* pool : pools) { + if (host->cluster().features() & + ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) { + pool->closeConnections(); + } else { + pool->drainConnections(); + } + } + } + } +} + ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::~ClusterEntry() { // We need to drain all connection pools for the cluster being removed. Then we can remove the // cluster. @@ -1368,13 +1431,11 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::~ClusterEntry() // TODO(mattklein123): Optimally, we would just fire member changed callbacks and remove all of // the hosts inside of the HostImpl destructor. That is a change with wide implications, so we are // going with a more targeted approach for now. - for (auto& host_set : priority_set_.hostSetsPerPriority()) { - parent_.drainConnPools(host_set->hosts()); - } + drainConnPools(); } Http::ConnectionPool::Instance* -ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( +ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::httpConnPoolImpl( ResourcePriority priority, absl::optional downstream_protocol, LoadBalancerContext* context, bool peek) { HostConstSharedPtr host = (peek ? lb_->peekAnotherHost(context) : lb_->chooseHost(context)); @@ -1439,8 +1500,9 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( have_transport_socket_options ? context->upstreamTransportSocketOptions() : nullptr, parent_.parent_.time_source_, parent_.cluster_manager_state_); - pool->addIdleCallback( - [this, host, priority, hash_key]() { httpConnPoolIsIdle(host, priority, hash_key); }); + pool->addIdleCallback([&parent = parent_, host, priority, hash_key]() { + parent.httpConnPoolIsIdle(host, priority, hash_key); + }); return pool; }); @@ -1452,15 +1514,15 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( } } -void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::httpConnPoolIsIdle( +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::httpConnPoolIsIdle( HostConstSharedPtr host, ResourcePriority priority, const std::vector& hash_key) { - if (parent_.destroying_) { + if (destroying_) { // If the Cluster is being destroyed, this pool will be cleaned up by that // process. return; } - ConnPoolsContainer* container = parent_.getHttpConnPoolsContainer(host); + ConnPoolsContainer* container = getHttpConnPoolsContainer(host); if (container == nullptr) { // This could happen if we have cleaned out the host before iterating through every // connection pool. Handle it by just continuing. @@ -1478,14 +1540,14 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::httpConnPo // comment in `ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools`. if (!container->do_not_delete_ && container->pools_->size() == 0) { ENVOY_LOG(trace, "Pool container empty for host {}, erasing host entry", host); - parent_.host_http_conn_pool_map_.erase( + host_http_conn_pool_map_.erase( host); // NOTE: `container` is erased after this point in the lambda. } } } Tcp::ConnectionPool::Instance* -ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( +ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPoolImpl( ResourcePriority priority, LoadBalancerContext* context, bool peek) { HostConstSharedPtr host = (peek ? lb_->peekAnotherHost(context) : lb_->chooseHost(context)); if (!host) { @@ -1533,21 +1595,21 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( parent_.cluster_manager_state_)); ASSERT(inserted); pool_iter->second->addIdleCallback( - [this, host, hash_key]() { tcpConnPoolIsIdle(host, hash_key); }); + [&parent = parent_, host, hash_key]() { parent.tcpConnPoolIsIdle(host, hash_key); }); } return pool_iter->second.get(); } -void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPoolIsIdle( +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::tcpConnPoolIsIdle( HostConstSharedPtr host, const std::vector& hash_key) { - if (parent_.destroying_) { + if (destroying_) { // If the Cluster is being destroyed, this pool will be cleaned up by that process. return; } - auto it = parent_.host_tcp_conn_pool_map_.find(host); - if (it != parent_.host_tcp_conn_pool_map_.end()) { + auto it = host_tcp_conn_pool_map_.find(host); + if (it != host_tcp_conn_pool_map_.end()) { TcpConnPoolsContainer& container = it->second; auto erase_iter = container.pools_.find(hash_key); @@ -1555,13 +1617,13 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPoo if (container.draining_ || Runtime::runtimeFeatureEnabled("envoy.reloadable_features.conn_pool_delete_when_idle")) { ENVOY_LOG(trace, "Idle pool, erasing pool for host {}", host); - parent_.thread_local_dispatcher_.deferredDelete(std::move(erase_iter->second)); + thread_local_dispatcher_.deferredDelete(std::move(erase_iter->second)); container.pools_.erase(erase_iter); } } if (container.pools_.empty()) { - parent_.host_tcp_conn_pool_map_.erase( + host_tcp_conn_pool_map_.erase( host); // NOTE: `container` is erased after this point in the lambda. } } diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 8f66d055f49e5..1540defd97712 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -315,9 +315,12 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable>; - struct ClusterEntry : public ThreadLocalCluster { + class ClusterEntry : public ThreadLocalCluster { + public: ClusterEntry(ThreadLocalClusterManagerImpl& parent, ClusterInfoConstSharedPtr cluster, const LoadBalancerFactorySharedPtr& lb_factory); ~ClusterEntry() override; - Http::ConnectionPool::Instance* connPool(ResourcePriority priority, - absl::optional downstream_protocol, - LoadBalancerContext* context, bool peek); - - Tcp::ConnectionPool::Instance* tcpConnPool(ResourcePriority priority, - LoadBalancerContext* context, bool peek); - - void httpConnPoolIsIdle(HostConstSharedPtr host, ResourcePriority priority, - const std::vector& hash_key); - void tcpConnPoolIsIdle(HostConstSharedPtr host, const std::vector& hash_key); - // Upstream::ThreadLocalCluster const PrioritySet& prioritySet() override { return priority_set_; } ClusterInfoConstSharedPtr info() override { return cluster_info_; } @@ -428,6 +421,30 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable overprovisioning_factor, + HostMapConstSharedPtr cross_priority_host_map); + + // Drains any connection pools associated with the removed hosts. + void drainConnPools(const HostVector& hosts_removed); + // Drains idle clients in connection pools for all hosts. + void drainConnPools(); + // Drain all clients in connection pools for all hosts. + void drainAllConnPools(); + + private: + Http::ConnectionPool::Instance* + httpConnPoolImpl(ResourcePriority priority, + absl::optional downstream_protocol, + LoadBalancerContext* context, bool peek); + + Tcp::ConnectionPool::Instance* tcpConnPoolImpl(ResourcePriority priority, + LoadBalancerContext* context, bool peek); + ThreadLocalClusterManagerImpl& parent_; PrioritySetImpl priority_set_; // LB factory if applicable. Not all load balancer types have a factory. LB types that have @@ -450,16 +467,26 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable& local_cluster_params); ~ThreadLocalClusterManagerImpl() override; + // TODO(junr03): clean up drainConnPools vs drainAllConnPools once ConnPoolImplBase::startDrain + // and + // ConnPoolImplBase::drainConnections() get cleaned up. The code in onHostHealthFailure and the + // code in ThreadLocalClusterManagerImpl::drainConnPools(const HostVector& hosts) is very + // similar and can be merged in a similar fashion to the ConnPoolImplBase case. void drainConnPools(const HostVector& hosts); void drainConnPools(HostSharedPtr old_host, ConnPoolsContainer& container); void drainTcpConnPools(TcpConnPoolsContainer& container); + void drainAllConnPoolsWorker(const HostSharedPtr& host); + void httpConnPoolIsIdle(HostConstSharedPtr host, ResourcePriority priority, + const std::vector& hash_key); + void tcpConnPoolIsIdle(HostConstSharedPtr host, const std::vector& hash_key); void removeTcpConn(const HostConstSharedPtr& host, Network::ClientConnection& connection); void removeHosts(const std::string& name, const HostVector& hosts_removed); void updateClusterMembership(const std::string& name, uint32_t priority, PrioritySet::UpdateHostsParams update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, const HostVector& hosts_removed, - uint64_t overprovisioning_factor); + uint64_t overprovisioning_factor, + HostMapConstSharedPtr cross_priority_host_map); void onHostHealthFailure(const HostSharedPtr& host); ConnPoolsContainer* getHttpConnPoolsContainer(const HostConstSharedPtr& host, diff --git a/source/common/upstream/edf_scheduler.h b/source/common/upstream/edf_scheduler.h index eaac9f08857a0..e667c8808e70a 100644 --- a/source/common/upstream/edf_scheduler.h +++ b/source/common/upstream/edf_scheduler.h @@ -3,6 +3,8 @@ #include #include +#include "envoy/upstream/scheduler.h" + #include "source/common/common/assert.h" namespace Envoy { @@ -23,16 +25,10 @@ namespace Upstream { // Each pick from the schedule has the earliest deadline entry selected. Entries have deadlines set // at current time + 1 / weight, providing weighted round robin behavior with floating point // weights and an O(log n) pick time. -template class EdfScheduler { +template class EdfScheduler : public Scheduler { public: - // Each time peekAgain is called, it will return the best-effort subsequent - // pick, popping and reinserting the entry as if it had been picked, and - // inserting it into the pre-picked queue. - // The first time peekAgain is called, it will return the - // first item which will be picked, the second time it is called it will - // return the second item which will be picked. As picks occur, that window - // will shrink. - std::shared_ptr peekAgain(std::function calculate_weight) { + // See scheduler.h for an explanation of each public method. + std::shared_ptr peekAgain(std::function calculate_weight) override { if (hasEntry()) { prepick_list_.push_back(std::move(queue_.top().entry_)); std::shared_ptr ret{prepick_list_.back()}; @@ -43,12 +39,7 @@ template class EdfScheduler { return nullptr; } - /** - * Pick queue entry with closest deadline and adds it back using the weight - * from calculate_weight. - * @return std::shared_ptr to next valid the queue entry if or nullptr if none exists. - */ - std::shared_ptr pickAndAdd(std::function calculate_weight) { + std::shared_ptr pickAndAdd(std::function calculate_weight) override { while (!prepick_list_.empty()) { // In this case the entry was added back during peekAgain so don't re-add. if (prepick_list_.front().expired()) { @@ -68,12 +59,7 @@ template class EdfScheduler { return nullptr; } - /** - * Insert entry into queue with a given weight. The deadline will be current_time_ + 1 / weight. - * @param weight floating point weight. - * @param entry shared pointer to entry, only a weak reference will be retained. - */ - void add(double weight, std::shared_ptr entry) { + void add(double weight, std::shared_ptr entry) override { ASSERT(weight > 0); const double deadline = current_time_ + 1.0 / weight; EDF_TRACE("Insertion {} in queue with deadline {} and weight {}.", @@ -82,11 +68,7 @@ template class EdfScheduler { ASSERT(queue_.top().deadline_ >= current_time_); } - /** - * Implements empty() on the internal queue. Does not attempt to discard expired elements. - * @return bool whether or not the internal queue is empty. - */ - bool empty() const { return queue_.empty(); } + bool empty() const override { return queue_.empty(); } private: /** diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index c8ea0bb333487..b25bdc07e5824 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -1,6 +1,5 @@ #include "source/common/upstream/eds.h" -#include "envoy/api/v2/endpoint.pb.h" #include "envoy/common/exception.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/config_source.pb.h" @@ -10,7 +9,6 @@ #include "source/common/common/utility.h" #include "source/common/config/api_version.h" #include "source/common/config/decoded_resource_impl.h" -#include "source/common/config/version_converter.h" namespace Envoy { namespace Upstream { @@ -22,7 +20,6 @@ EdsClusterImpl::EdsClusterImpl( : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, factory_context.dispatcher().timeSource()), Envoy::Config::SubscriptionBase( - cluster.eds_cluster_config().eds_config().resource_api_version(), factory_context.messageValidationVisitor(), "cluster_name"), local_info_(factory_context.localInfo()), cluster_name_(cluster.eds_cluster_config().service_name().empty() @@ -47,7 +44,6 @@ EdsClusterImpl::EdsClusterImpl( void EdsClusterImpl::startPreInit() { subscription_->start({cluster_name_}); } void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& host_update_cb) { - absl::flat_hash_map updated_hosts; absl::flat_hash_set all_new_hosts; PriorityStateManager priority_state_manager(parent_, parent_.local_info_, &host_update_cb); for (const auto& locality_lb_endpoint : cluster_load_assignment_.endpoints()) { @@ -57,6 +53,11 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { auto address = parent_.resolveProtoAddress(lb_endpoint.endpoint().address()); + // When the configuration contains duplicate hosts, only the first one will be retained. + if (all_new_hosts.count(address->asString()) > 0) { + continue; + } + priority_state_manager.registerHostForPriority(lb_endpoint.endpoint().hostname(), address, locality_lb_endpoint, lb_endpoint, parent_.time_source_); @@ -67,6 +68,11 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h // Track whether we rebuilt any LB structures. bool cluster_rebuilt = false; + // Get the map of all the latest existing hosts, which is used to filter out the existing + // hosts in the process of updating cluster memberships. + HostMapConstSharedPtr all_hosts = parent_.prioritySet().crossPriorityHostMap(); + ASSERT(all_hosts != nullptr); + const uint32_t overprovisioning_factor = PROTOBUF_GET_WRAPPED_OR_DEFAULT( cluster_load_assignment_.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); @@ -81,13 +87,13 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h if (priority_state[i].first != nullptr) { cluster_rebuilt |= parent_.updateHostsPerLocality( i, overprovisioning_factor, *priority_state[i].first, parent_.locality_weights_map_[i], - priority_state[i].second, priority_state_manager, updated_hosts, all_new_hosts); + priority_state[i].second, priority_state_manager, *all_hosts, all_new_hosts); } else { // If the new update contains a priority with no hosts, call the update function with an empty // set of hosts. cluster_rebuilt |= parent_.updateHostsPerLocality( i, overprovisioning_factor, {}, parent_.locality_weights_map_[i], empty_locality_map, - priority_state_manager, updated_hosts, all_new_hosts); + priority_state_manager, *all_hosts, all_new_hosts); } } @@ -100,11 +106,9 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h } cluster_rebuilt |= parent_.updateHostsPerLocality( i, overprovisioning_factor, {}, parent_.locality_weights_map_[i], empty_locality_map, - priority_state_manager, updated_hosts, all_new_hosts); + priority_state_manager, *all_hosts, all_new_hosts); } - parent_.all_hosts_ = std::move(updated_hosts); - if (!cluster_rebuilt) { parent_.info_->stats().update_no_rebuild_.inc(); } @@ -126,9 +130,6 @@ void EdsClusterImpl::onConfigUpdate(const std::vectorenabled()) { @@ -227,18 +228,12 @@ void EdsClusterImpl::reloadHealthyHostsHelper(const HostSharedPtr& host) { HostSetImpl::partitionHosts(hosts_copy, hosts_per_locality_copy), host_set->localityWeights(), {}, hosts_to_remove, absl::nullopt); } - - if (host_to_exclude != nullptr) { - ASSERT(all_hosts_.find(host_to_exclude->address()->asString()) != all_hosts_.end()); - all_hosts_.erase(host_to_exclude->address()->asString()); - } } bool EdsClusterImpl::updateHostsPerLocality( const uint32_t priority, const uint32_t overprovisioning_factor, const HostVector& new_hosts, LocalityWeightsMap& locality_weights_map, LocalityWeightsMap& new_locality_weights_map, - PriorityStateManager& priority_state_manager, - absl::flat_hash_map& updated_hosts, + PriorityStateManager& priority_state_manager, const HostMap& all_hosts, const absl::flat_hash_set& all_new_hosts) { const auto& host_set = priority_set_.getOrCreateHostSet(priority, overprovisioning_factor); HostVectorSharedPtr current_hosts_copy(new HostVector(host_set.hosts())); @@ -255,9 +250,8 @@ bool EdsClusterImpl::updateHostsPerLocality( // performance implications, since this has the knock on effect that we rebuild the load balancers // and locality scheduler. See the comment in BaseDynamicClusterImpl::updateDynamicHostList // about this. In the future we may need to do better here. - const bool hosts_updated = - updateDynamicHostList(new_hosts, *current_hosts_copy, hosts_added, hosts_removed, - updated_hosts, all_hosts_, all_new_hosts); + const bool hosts_updated = updateDynamicHostList(new_hosts, *current_hosts_copy, hosts_added, + hosts_removed, all_hosts, all_new_hosts); if (hosts_updated || host_set.overprovisioningFactor() != overprovisioning_factor || locality_weights_map != new_locality_weights_map) { ASSERT(std::all_of(current_hosts_copy->begin(), current_hosts_copy->end(), diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index 45469661df798..f396b5e1785f7 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -50,7 +50,8 @@ class EdsClusterImpl bool updateHostsPerLocality(const uint32_t priority, const uint32_t overprovisioning_factor, const HostVector& new_hosts, LocalityWeightsMap& locality_weights_map, LocalityWeightsMap& new_locality_weights_map, - PriorityStateManager& priority_state_manager, HostMap& updated_hosts, + PriorityStateManager& priority_state_manager, + const HostMap& all_hosts, const absl::flat_hash_set& all_new_hosts); bool validateUpdateSize(int num_resources); @@ -78,7 +79,6 @@ class EdsClusterImpl const LocalInfo::LocalInfo& local_info_; const std::string cluster_name_; std::vector locality_weights_map_; - HostMap all_hosts_; Event::TimerPtr assignment_timeout_; InitializePhase initialize_phase_; }; diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index b82058cf2aadd..e93505b4d619c 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -72,6 +72,10 @@ MetadataConstSharedPtr HealthCheckerImplBase::initTransportSocketMatchMetadata( } HealthCheckerImplBase::~HealthCheckerImplBase() { + // First clear callbacks that otherwise will be run from + // ActiveHealthCheckSession::onDeferredDeleteBase(). This prevents invoking a callback on a + // deleted parent object (e.g. Cluster). + callbacks_.clear(); // ASSERTs inside the session destructor check to make sure we have been previously deferred // deleted. Unify that logic here before actual destruction happens. for (auto& session : active_sessions_) { @@ -258,6 +262,11 @@ void HealthCheckerImplBase::ActiveHealthCheckSession::onDeferredDeleteBase() { parent_.decDegraded(); } onDeferredDelete(); + + // Run callbacks in case something is waiting for health checks to run which will now never run. + if (first_check_) { + parent_.runCallbacks(host_, HealthTransition::Unchanged); + } } void HealthCheckerImplBase::ActiveHealthCheckSession::handleSuccess(bool degraded) { diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 2859e58767584..2e7a1689b5191 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -139,19 +139,9 @@ HttpHealthCheckerImpl::HttpHealthCheckerImpl(const Cluster& cluster, config.http_health_check().request_headers_to_remove())), http_status_checker_(config.http_health_check().expected_statuses(), static_cast(Http::Code::OK)), - codec_client_type_( - codecClientType(config.http_health_check().hidden_envoy_deprecated_use_http2() - ? envoy::type::v3::HTTP2 - : config.http_health_check().codec_client_type())), + codec_client_type_(codecClientType(config.http_health_check().codec_client_type())), random_generator_(random) { - // The deprecated service_name field was previously being used to compare with the health checked - // cluster name using a StartsWith comparison. Since StartsWith is essentially a prefix - // comparison, representing the intent by using a StringMatcher prefix is a more natural way. - if (!config.http_health_check().hidden_envoy_deprecated_service_name().empty()) { - envoy::type::matcher::v3::StringMatcher matcher; - matcher.set_prefix(config.http_health_check().hidden_envoy_deprecated_service_name()); - service_name_matcher_.emplace(matcher); - } else if (config.http_health_check().has_service_name_matcher()) { + if (config.http_health_check().has_service_name_matcher()) { service_name_matcher_.emplace(config.http_health_check().service_name_matcher()); } } @@ -215,7 +205,7 @@ HttpHealthCheckerImpl::HttpActiveHealthCheckSession::HttpActiveHealthCheckSessio : ActiveHealthCheckSession(parent, host), parent_(parent), hostname_(getHostname(host, parent_.host_value_, parent_.cluster_.info())), protocol_(codecClientTypeToProtocol(parent_.codec_client_type_)), - local_address_provider_(std::make_shared( + local_connection_info_provider_(std::make_shared( Network::Utility::getCanonicalIpv4LoopbackAddress(), Network::Utility::getCanonicalIpv4LoopbackAddress())) {} @@ -280,7 +270,7 @@ void HttpHealthCheckerImpl::HttpActiveHealthCheckSession::onInterval() { host_->transportSocketFactory().implementsSecureTransport(), host_->transportSocketFactory().implementsSecureTransport()); StreamInfo::StreamInfoImpl stream_info(protocol_, parent_.dispatcher_.timeSource(), - local_address_provider_); + local_connection_info_provider_); stream_info.onUpstreamHostSelected(host_); parent_.request_headers_parser_->evaluateHeaders(*request_headers, stream_info); auto status = request_encoder->encodeHeaders(*request_headers, true); diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index ec3b615ea5abe..35a564f6118b9 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -141,7 +141,7 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { Http::ResponseHeaderMapPtr response_headers_; const std::string& hostname_; const Http::Protocol protocol_; - Network::SocketAddressProviderSharedPtr local_address_provider_; + Network::ConnectionInfoProviderSharedPtr local_connection_info_provider_; bool expect_reset_{}; bool reuse_connection_ = false; bool request_in_flight_ = false; @@ -163,7 +163,8 @@ class HttpHealthCheckerImpl : public HealthCheckerImplBase { const std::string path_; const std::string host_value_; - absl::optional service_name_matcher_; + absl::optional> + service_name_matcher_; Router::HeaderParserPtr request_headers_parser_; const HttpStatusChecker http_status_checker_; diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index 96f3c07580ac5..9fad649f3e151 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -9,7 +9,6 @@ #include "envoy/service/health/v3/hds.pb.validate.h" #include "envoy/stats/scope.h" -#include "source/common/config/version_converter.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/common/protobuf/protobuf.h" #include "source/common/protobuf/utility.h" @@ -29,7 +28,6 @@ static constexpr uint32_t RetryInitialDelayMilliseconds = 1000; static constexpr uint32_t RetryMaxDelayMilliseconds = 30000; HdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_client, - envoy::config::core::v3::ApiVersion transport_api_version, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Envoy::Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, ClusterInfoFactory& info_factory, @@ -39,13 +37,10 @@ HdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_clie ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, const Server::Options& options) : stats_{ALL_HDS_STATS(POOL_COUNTER_PREFIX(scope, "hds_delegate."))}, - service_method_(Grpc::VersionedMethods( - "envoy.service.health.v3.HealthDiscoveryService.StreamHealthCheck", - "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck") - .getMethodDescriptorForVersion(transport_api_version)), - async_client_(std::move(async_client)), transport_api_version_(transport_api_version), - dispatcher_(dispatcher), runtime_(runtime), store_stats_(stats), - ssl_context_manager_(ssl_context_manager), info_factory_(info_factory), + service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.health.v3.HealthDiscoveryService.StreamHealthCheck")), + async_client_(std::move(async_client)), dispatcher_(dispatcher), runtime_(runtime), + store_stats_(stats), ssl_context_manager_(ssl_context_manager), info_factory_(info_factory), access_log_manager_(access_log_manager), cm_(cm), local_info_(local_info), admin_(admin), singleton_manager_(singleton_manager), tls_(tls), specifier_hash_(0), validation_visitor_(validation_visitor), api_(api), options_(options) { @@ -87,8 +82,6 @@ void HdsDelegate::establishNewStream() { return; } - Config::VersionConverter::prepareMessageForGrpcWire(health_check_request_, - transport_api_version_); ENVOY_LOG(debug, "Sending HealthCheckRequest {} ", health_check_request_.DebugString()); stream_->sendMessage(health_check_request_, false); stats_.responses_.inc(); diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index dd9b37722ee4f..342e0e56ca27e 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -148,7 +148,6 @@ class HdsDelegate : Grpc::AsyncStreamCallbacks { public: HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_client, - envoy::config::core::v3::ApiVersion transport_api_version, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Envoy::Stats::Store& stats, Ssl::ContextManager& ssl_context_manager, ClusterInfoFactory& info_factory, AccessLog::AccessLogManager& access_log_manager, ClusterManager& cm, @@ -188,7 +187,6 @@ class HdsDelegate : Grpc::AsyncStreamCallbacks async_client_; - const envoy::config::core::v3::ApiVersion transport_api_version_; Grpc::AsyncStream stream_{}; Event::Dispatcher& dispatcher_; diff --git a/source/common/upstream/leds.cc b/source/common/upstream/leds.cc new file mode 100644 index 0000000000000..ef23bd0cdeeb6 --- /dev/null +++ b/source/common/upstream/leds.cc @@ -0,0 +1,87 @@ +#include "source/common/upstream/leds.h" + +#include "envoy/common/exception.h" +#include "envoy/config/core/v3/config_source.pb.h" + +#include "source/common/common/assert.h" +#include "source/common/config/decoded_resource_impl.h" +#include "source/common/config/xds_resource.h" + +namespace Envoy { +namespace Upstream { + +LedsSubscription::LedsSubscription( + const envoy::config::endpoint::v3::LedsClusterLocalityConfig& leds_config, + const std::string& cluster_name, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::Scope& cluster_stats_scope, const UpdateCb& callback) + : Envoy::Config::SubscriptionBase( + factory_context.messageValidationVisitor(), leds_config.leds_collection_name()), + local_info_(factory_context.localInfo()), cluster_name_(cluster_name), + stats_scope_(cluster_stats_scope.createScope("leds.")), + stats_({ALL_LEDS_STATS(POOL_COUNTER(*stats_scope_))}), callback_(callback) { + const xds::core::v3::ResourceLocator leds_resource_locator = + Config::XdsResourceIdentifier::decodeUrl(leds_config.leds_collection_name()); + const auto resource_name = getResourceName(); + subscription_ = + factory_context.clusterManager().subscriptionFactory().collectionSubscriptionFromUrl( + leds_resource_locator, leds_config.leds_config(), resource_name, *stats_scope_, *this, + resource_decoder_); + subscription_->start({}); +} + +void LedsSubscription::onConfigUpdate( + const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, const std::string&) { + // At least one resource must be added or removed. + if (added_resources.empty() && removed_resources.empty()) { + ENVOY_LOG(debug, "No added or removed LbEndpoint entries for cluster {} in onConfigUpdate()", + cluster_name_); + stats_.update_empty_.inc(); + // If it's the first update, and it has no resources, set the locality as active, + // and update whoever is waiting for it, to allow the system to initialize. + if (!initial_update_attempt_complete_) { + initial_update_attempt_complete_ = true; + callback_(); + } + return; + } + + ENVOY_LOG(info, "{}: add {} endpoint(s), remove {} endpoints(s)", cluster_name_, + added_resources.size(), removed_resources.size()); + + // Update the internal host list with the removed hosts. + for (const auto& removed_resource_name : removed_resources) { + // Remove the entry from the endpoints list. + ENVOY_LOG(debug, "Removing endpoint {} using LEDS update.", removed_resource_name); + endpoints_map_.erase(removed_resource_name); + } + + // Update the internal host list with the added hosts. + for (const auto& added_resource : added_resources) { + const auto& added_resource_name = added_resource.get().name(); + ENVOY_LOG(trace, "Adding/Updating endpoint {} using LEDS update.", added_resource_name); + envoy::config::endpoint::v3::LbEndpoint lb_endpoint = + dynamic_cast( + added_resource.get().resource()); + endpoints_map_[added_resource_name] = std::move(lb_endpoint); + } + + // Notify the callbacks that the host list has been modified. + initial_update_attempt_complete_ = true; + callback_(); +} + +void LedsSubscription::onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, + const EnvoyException*) { + ASSERT(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure != reason); + ENVOY_LOG(debug, "LEDS update failed"); + + // Similar to EDS, we need to let the system initialize. Set the locality as + // active, and update whoever is waiting for it. + initial_update_attempt_complete_ = true; + callback_(); +} + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/leds.h b/source/common/upstream/leds.h new file mode 100644 index 0000000000000..872c44c789b97 --- /dev/null +++ b/source/common/upstream/leds.h @@ -0,0 +1,85 @@ +#pragma once + +#include + +#include "envoy/config/endpoint/v3/endpoint_components.pb.h" +#include "envoy/config/endpoint/v3/endpoint_components.pb.validate.h" +#include "envoy/config/subscription.h" +#include "envoy/local_info/local_info.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" + +#include "source/common/config/subscription_base.h" +#include "source/common/upstream/upstream_impl.h" + +namespace Envoy { +namespace Upstream { + +/** + * All per-cluster LEDS stats. @see stats_macros.h + * These will be added to the subscription stats. + */ +#define ALL_LEDS_STATS(COUNTER) COUNTER(update_empty) + +/** + * Struct definition for all per-cluster LEDS stats. @see stats_macros.h + */ +struct LedsStats { + ALL_LEDS_STATS(GENERATE_COUNTER_STRUCT) +}; + +/* + * A single subscription for all LEDS resources of a specific SourceConfig that + * fetches updates from a Locality Endpoint Discovery Service. + * Multiple subscriptions with the same LEDS collection name can use a single + * subscription. + */ +class LedsSubscription + : private Envoy::Config::SubscriptionBase, + private Logger::Loggable { +public: + using UpdateCb = std::function; + using LbEndpointsMap = absl::flat_hash_map; + + LedsSubscription(const envoy::config::endpoint::v3::LedsClusterLocalityConfig& leds_config, + const std::string& cluster_name, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::Scope& stats_scope, const UpdateCb& callback); + + // Returns the map between registered LEDS resource names and their endpoints data. + const LbEndpointsMap& getEndpointsMap() const { return endpoints_map_; } + + // Returns true iff the endpoints were updated. + bool isUpdated() const { return initial_update_attempt_complete_; } + +private: + // Config::SubscriptionCallbacks + void onConfigUpdate(const std::vector&, const std::string&) override { + // LEDS is not used in SotW mode. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + void onConfigUpdate(const std::vector& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string&) override; + void onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason reason, + const EnvoyException* e) override; + + const LocalInfo::LocalInfo& local_info_; + const std::string cluster_name_; + // LEDS stats scope must outlive the subscription. + Stats::ScopePtr stats_scope_; + LedsStats stats_; + // A map between a LEDS resource name to the LbEndpoint data. + LbEndpointsMap endpoints_map_; + // A callback function activated after an update is received (either successful or + // unsuccessful). + const UpdateCb callback_; + // Once the endpoints of the locality are updated, it is considered active. + bool initial_update_attempt_complete_{false}; + Config::SubscriptionPtr subscription_; +}; + +using LedsSubscriptionPtr = std::unique_ptr; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/load_balancer_factory_base.h b/source/common/upstream/load_balancer_factory_base.h new file mode 100644 index 0000000000000..ce3fa56eabac7 --- /dev/null +++ b/source/common/upstream/load_balancer_factory_base.h @@ -0,0 +1,29 @@ +#pragma once + +#include "envoy/upstream/load_balancer.h" + +namespace Envoy { +namespace Upstream { + +/** + * Base class for cluster provided load balancers and load balancers specified by load balancing + * policy config. This class should be extended directly if the load balancing policy specifies a + * thread-aware load balancer. + * + * TODO: provide a ThreadLocalLoadBalancer construct to abstract away thread-awareness from load + * balancing extensions that don't require it. + */ +class TypedLoadBalancerFactoryBase : public TypedLoadBalancerFactory { +public: + // Upstream::TypedLoadBalancerFactory + std::string name() const override { return name_; } + +protected: + TypedLoadBalancerFactoryBase(const std::string& name) : name_(name) {} + +private: + const std::string name_; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/load_stats_reporter.cc b/source/common/upstream/load_stats_reporter.cc index 3b3f1bd06d08e..1121ecc6247c9 100644 --- a/source/common/upstream/load_stats_reporter.cc +++ b/source/common/upstream/load_stats_reporter.cc @@ -3,7 +3,6 @@ #include "envoy/service/load_stats/v3/lrs.pb.h" #include "envoy/stats/scope.h" -#include "source/common/config/version_converter.h" #include "source/common/protobuf/protobuf.h" namespace Envoy { @@ -12,15 +11,12 @@ namespace Upstream { LoadStatsReporter::LoadStatsReporter(const LocalInfo::LocalInfo& local_info, ClusterManager& cluster_manager, Stats::Scope& scope, Grpc::RawAsyncClientPtr async_client, - envoy::config::core::v3::ApiVersion transport_api_version, Event::Dispatcher& dispatcher) : cm_(cluster_manager), stats_{ALL_LOAD_REPORTER_STATS( POOL_COUNTER_PREFIX(scope, "load_reporter."))}, - async_client_(std::move(async_client)), transport_api_version_(transport_api_version), - service_method_( - Grpc::VersionedMethods("envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats", - "envoy.service.load_stats.v2.LoadReportingService.StreamLoadStats") - .getMethodDescriptorForVersion(transport_api_version)), + async_client_(std::move(async_client)), + service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats")), time_source_(dispatcher.timeSource()) { request_.mutable_node()->MergeFrom(local_info.node()); request_.mutable_node()->add_client_features("envoy.lrs.supports_send_all_clusters"); @@ -111,7 +107,6 @@ void LoadStatsReporter::sendLoadStatsRequest() { clusters_[cluster_name] = now; } - Config::VersionConverter::prepareMessageForGrpcWire(request_, transport_api_version_); ENVOY_LOG(trace, "Sending LoadStatsRequest: {}", request_.DebugString()); stream_->sendMessage(request_, false); stats_.responses_.inc(); diff --git a/source/common/upstream/load_stats_reporter.h b/source/common/upstream/load_stats_reporter.h index 97302ad1223f2..4ab1c9e4a8a27 100644 --- a/source/common/upstream/load_stats_reporter.h +++ b/source/common/upstream/load_stats_reporter.h @@ -34,7 +34,6 @@ class LoadStatsReporter public: LoadStatsReporter(const LocalInfo::LocalInfo& local_info, ClusterManager& cluster_manager, Stats::Scope& scope, Grpc::RawAsyncClientPtr async_client, - envoy::config::core::v3::ApiVersion transport_api_version, Event::Dispatcher& dispatcher); // Grpc::AsyncStreamCallbacks @@ -60,7 +59,6 @@ class LoadStatsReporter Grpc::AsyncClient async_client_; - const envoy::config::core::v3::ApiVersion transport_api_version_; Grpc::AsyncStream stream_{}; const Protobuf::MethodDescriptor& service_method_; Event::TimerPtr retry_timer_; diff --git a/source/common/upstream/logical_host.cc b/source/common/upstream/logical_host.cc index 847fcf83a9bc8..ef8620d4e6395 100644 --- a/source/common/upstream/logical_host.cc +++ b/source/common/upstream/logical_host.cc @@ -7,10 +7,13 @@ Upstream::Host::CreateConnectionData LogicalHost::createConnection( Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsConstSharedPtr transport_socket_options) const { const auto current_address = address(); - return {HostImpl::createConnection( - dispatcher, cluster(), current_address, transportSocketFactory(), options, - override_transport_socket_options_ != nullptr ? override_transport_socket_options_ - : transport_socket_options), + const std::vector& address_list = addressList(); + + return {HostImpl::createConnection(dispatcher, cluster(), current_address, address_list, + transportSocketFactory(), options, + override_transport_socket_options_ != nullptr + ? override_transport_socket_options_ + : transport_socket_options), std::make_shared(current_address, shared_from_this())}; } diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index 25e8ba1908873..9cf6887aff530 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -31,8 +31,8 @@ HostConstSharedPtr OriginalDstCluster::LoadBalancer::chooseHost(LoadBalancerCont const Network::Connection* connection = context->downstreamConnection(); // The local address of the downstream connection is the original destination address, // if localAddressRestored() returns 'true'. - if (connection && connection->addressProvider().localAddressRestored()) { - dst_host = connection->addressProvider().localAddress(); + if (connection && connection->connectionInfoProvider().localAddressRestored()) { + dst_host = connection->connectionInfoProvider().localAddress(); } } diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc index d46f3f96edfa9..0d03ea0a00f63 100644 --- a/source/common/upstream/strict_dns_cluster.cc +++ b/source/common/upstream/strict_dns_cluster.cc @@ -117,7 +117,6 @@ void StrictDnsClusterImpl::ResolveTarget::startResolve() { if (status == Network::DnsResolver::ResolutionStatus::Success) { parent_.info_->stats().update_success_.inc(); - HostMap updated_hosts; HostVector new_hosts; std::chrono::seconds ttl_refresh_rate = std::chrono::seconds::max(); absl::flat_hash_set all_new_hosts; @@ -127,33 +126,44 @@ void StrictDnsClusterImpl::ResolveTarget::startResolve() { // potentially move port handling into the DNS interface itself, which would work better // for SRV. ASSERT(resp.address_ != nullptr); + auto address = Network::Utility::getAddressWithPort(*(resp.address_), port_); + if (all_new_hosts.count(address->asString()) > 0) { + continue; + } + new_hosts.emplace_back(new HostImpl( - parent_.info_, hostname_, - Network::Utility::getAddressWithPort(*(resp.address_), port_), + parent_.info_, hostname_, address, // TODO(zyfjeff): Created through metadata shared pool std::make_shared(lb_endpoint_.metadata()), lb_endpoint_.load_balancing_weight().value(), locality_lb_endpoints_.locality(), lb_endpoint_.endpoint().health_check_config(), locality_lb_endpoints_.priority(), lb_endpoint_.health_status(), parent_.time_source_)); - all_new_hosts.emplace(new_hosts.back()->address()->asString()); + all_new_hosts.emplace(address->asString()); ttl_refresh_rate = min(ttl_refresh_rate, resp.ttl_); } HostVector hosts_added; HostVector hosts_removed; if (parent_.updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed, - updated_hosts, all_hosts_, all_new_hosts)) { + all_hosts_, all_new_hosts)) { ENVOY_LOG(debug, "DNS hosts have changed for {}", dns_address_); ASSERT(std::all_of(hosts_.begin(), hosts_.end(), [&](const auto& host) { return host->priority() == locality_lb_endpoints_.priority(); })); + + // Update host map for current resolve target. + for (const auto& host : hosts_removed) { + all_hosts_.erase(host->address()->asString()); + } + for (const auto& host : hosts_added) { + all_hosts_.insert({host->address()->asString(), host}); + } + parent_.updateAllHosts(hosts_added, hosts_removed, locality_lb_endpoints_.priority()); } else { parent_.info_->stats().update_no_rebuild_.inc(); } - all_hosts_ = std::move(updated_hosts); - // reset failure backoff strategy because there was a success. parent_.failure_backoff_strategy_->reset(); diff --git a/source/common/upstream/strict_dns_cluster.h b/source/common/upstream/strict_dns_cluster.h index 6be3a83f436ec..70c3040414524 100644 --- a/source/common/upstream/strict_dns_cluster.h +++ b/source/common/upstream/strict_dns_cluster.h @@ -41,6 +41,13 @@ class StrictDnsClusterImpl : public BaseDynamicClusterImpl { const uint32_t port_; const Event::TimerPtr resolve_timer_; HostVector hosts_; + + // Host map for current resolve target. When we have multiple resolve targets, multiple targets + // may contain two different hosts with the same address. This has two effects: + // 1) This host map cannot be replaced by the cross-priority global host map in the priority + // set. + // 2) Cross-priority global host map may not be able to search for the expected host based on + // the address. HostMap all_hosts_; }; diff --git a/source/common/upstream/subset_lb.cc b/source/common/upstream/subset_lb.cc index 95cbf16af8cb4..2a080c32ee60a 100644 --- a/source/common/upstream/subset_lb.cc +++ b/source/common/upstream/subset_lb.cc @@ -781,8 +781,8 @@ SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalan case LoadBalancerType::OriginalDst: case LoadBalancerType::ClusterProvided: - // LoadBalancerType::OriginalDst is blocked in the factory. LoadBalancerType::ClusterProvided - // is impossible because the subset LB returns a null load balancer from its factory. + case LoadBalancerType::LoadBalancingPolicyConfig: + // These load balancer types can only be created when there is no subset configuration. NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 63c68663ee0f3..5897aab3e2f1e 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -36,6 +36,7 @@ #include "source/common/http/http2/codec_stats.h" #include "source/common/http/utility.h" #include "source/common/network/address_impl.h" +#include "source/common/network/happy_eyeballs_connection_impl.h" #include "source/common/network/resolver_impl.h" #include "source/common/network/socket_option_factory.h" #include "source/common/network/socket_option_impl.h" @@ -123,7 +124,6 @@ parseClusterSocketOptions(const envoy::config::cluster::v3::Cluster& config, ProtocolOptionsConfigConstSharedPtr createProtocolOptionsConfig(const std::string& name, const ProtobufWkt::Any& typed_config, - const ProtobufWkt::Struct& config, Server::Configuration::ProtocolOptionsFactoryContext& factory_context) { Server::Configuration::ProtocolOptionsFactory* factory = Registry::FactoryRegistry::getFactory( @@ -151,7 +151,7 @@ createProtocolOptionsConfig(const std::string& name, const ProtobufWkt::Any& typ } Envoy::Config::Utility::translateOpaqueConfig( - typed_config, config, factory_context.messageValidationVisitor(), *proto_config); + typed_config, factory_context.messageValidationVisitor(), *proto_config); return factory->createProtocolOptionsConfig(*proto_config, factory_context); } @@ -166,8 +166,7 @@ absl::flat_hash_map parseExten // protocol options. auto& name = Extensions::NetworkFilters::Common::FilterNameUtil::canonicalFilterName(it.first); - auto object = createProtocolOptionsConfig( - name, it.second, ProtobufWkt::Struct::default_instance(), factory_context); + auto object = createProtocolOptionsConfig(name, it.second, factory_context); if (object != nullptr) { options[name] = std::move(object); } @@ -262,8 +261,8 @@ Network::TransportSocketFactory& HostDescriptionImpl::resolveTransportSocketFact Host::CreateConnectionData HostImpl::createConnection( Event::Dispatcher& dispatcher, const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsConstSharedPtr transport_socket_options) const { - return {createConnection(dispatcher, cluster(), address(), transportSocketFactory(), options, - transport_socket_options), + return {createConnection(dispatcher, cluster(), address(), addressList(), + transportSocketFactory(), options, transport_socket_options), shared_from_this()}; } @@ -293,17 +292,18 @@ Host::CreateConnectionData HostImpl::createHealthCheckConnection( Network::TransportSocketFactory& factory = (metadata != nullptr) ? resolveTransportSocketFactory(healthCheckAddress(), metadata) : transportSocketFactory(); - return {createConnection(dispatcher, cluster(), healthCheckAddress(), factory, nullptr, + return {createConnection(dispatcher, cluster(), healthCheckAddress(), {}, factory, nullptr, transport_socket_options), shared_from_this()}; } -Network::ClientConnectionPtr -HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& cluster, - const Network::Address::InstanceConstSharedPtr& address, - Network::TransportSocketFactory& socket_factory, - const Network::ConnectionSocket::OptionsSharedPtr& options, - Network::TransportSocketOptionsConstSharedPtr transport_socket_options) { +Network::ClientConnectionPtr HostImpl::createConnection( + Event::Dispatcher& dispatcher, const ClusterInfo& cluster, + const Network::Address::InstanceConstSharedPtr& address, + const std::vector& address_list, + Network::TransportSocketFactory& socket_factory, + const Network::ConnectionSocket::OptionsSharedPtr& options, + Network::TransportSocketOptionsConstSharedPtr transport_socket_options) { Network::ConnectionSocket::OptionsSharedPtr connection_options; if (cluster.clusterSocketOptions() != nullptr) { if (options) { @@ -318,10 +318,16 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu connection_options = options; } ASSERT(!address->envoyInternalAddress()); - Network::ClientConnectionPtr connection = dispatcher.createClientConnection( - address, cluster.sourceAddress(), - socket_factory.createTransportSocket(std::move(transport_socket_options)), - connection_options); + Network::ClientConnectionPtr connection = + address_list.size() > 1 + ? std::make_unique( + dispatcher, address_list, cluster.sourceAddress(), socket_factory, + transport_socket_options, connection_options) + : dispatcher.createClientConnection( + address, cluster.sourceAddress(), + socket_factory.createTransportSocket(std::move(transport_socket_options)), + connection_options); + connection->setBufferLimits(cluster.perConnectionBufferLimitBytes()); cluster.createNetworkFilterChain(*connection); return connection; @@ -543,13 +549,18 @@ PrioritySetImpl::getOrCreateHostSet(uint32_t priority, void PrioritySetImpl::updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, const HostVector& hosts_removed, - absl::optional overprovisioning_factor) { + absl::optional overprovisioning_factor, + HostMapConstSharedPtr cross_priority_host_map) { // Ensure that we have a HostSet for the given priority. getOrCreateHostSet(priority, overprovisioning_factor); static_cast(host_sets_[priority].get()) ->updateHosts(std::move(update_hosts_params), std::move(locality_weights), hosts_added, hosts_removed, overprovisioning_factor); + if (cross_priority_host_map != nullptr) { + const_cross_priority_host_map_ = std::move(cross_priority_host_map); + } + if (!batch_update_) { runUpdateCallbacks(hosts_added, hosts_removed); } @@ -588,6 +599,52 @@ void PrioritySetImpl::BatchUpdateScope::updateHosts( hosts_removed, overprovisioning_factor); } +void MainPrioritySetImpl::updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, + const HostVector& hosts_added, + const HostVector& hosts_removed, + absl::optional overprovisioning_factor, + HostMapConstSharedPtr cross_priority_host_map) { + ASSERT(cross_priority_host_map == nullptr, + "External cross-priority host map is meaningless to MainPrioritySetImpl"); + updateCrossPriorityHostMap(hosts_added, hosts_removed); + + PrioritySetImpl::updateHosts(priority, std::move(update_hosts_params), locality_weights, + hosts_added, hosts_removed, overprovisioning_factor); +} + +HostMapConstSharedPtr MainPrioritySetImpl::crossPriorityHostMap() const { + // Check if the host set in the main thread PrioritySet has been updated. + if (mutable_cross_priority_host_map_ != nullptr) { + const_cross_priority_host_map_ = std::move(mutable_cross_priority_host_map_); + ASSERT(mutable_cross_priority_host_map_ == nullptr); + } + return const_cross_priority_host_map_; +} + +void MainPrioritySetImpl::updateCrossPriorityHostMap(const HostVector& hosts_added, + const HostVector& hosts_removed) { + if (hosts_added.empty() && hosts_removed.empty()) { + // No new hosts have been added and no old hosts have been removed. + return; + } + + // Since read_only_all_host_map_ may be shared by multiple threads, when the host set changes, we + // cannot directly modify read_only_all_host_map_. + if (mutable_cross_priority_host_map_ == nullptr) { + // Copy old read only host map to mutable host map. + mutable_cross_priority_host_map_ = std::make_shared(*const_cross_priority_host_map_); + } + + for (const auto& host : hosts_removed) { + mutable_cross_priority_host_map_->erase(host->address()->asString()); + } + + for (const auto& host : hosts_added) { + mutable_cross_priority_host_map_->insert({host->address()->asString(), host}); + } +} + ClusterStats ClusterInfoImpl::generateStats(Stats::Scope& scope, const ClusterStatNames& stat_names) { return ClusterStats(stat_names, scope); @@ -799,6 +856,45 @@ ClusterInfoImpl::ClusterInfoImpl( lb_type_ = LoadBalancerType::ClusterProvided; break; + case envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG: { + if (config.has_lb_subset_config()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + if (config.has_common_lb_config()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} cannot be combined with common_lb_config", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + if (!config.has_load_balancing_policy()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} requires load_balancing_policy to be set", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + for (const auto& policy : config.load_balancing_policy().policies()) { + TypedLoadBalancerFactory* factory = + Config::Utility::getAndCheckFactory( + policy.typed_extension_config(), /*is_optional=*/true); + if (factory != nullptr) { + load_balancing_policy_ = policy; + load_balancer_factory_ = factory; + break; + } + } + + if (load_balancer_factory_ == nullptr) { + throw EnvoyException(fmt::format( + "Didn't find a registered load balancer factory implementation for cluster: '{}'", + name_)); + } + + lb_type_ = LoadBalancerType::LoadBalancingPolicyConfig; + break; + } default: NOT_REACHED_GCOVR_EXCL_LINE; } @@ -841,7 +937,7 @@ ClusterInfoImpl::ClusterInfoImpl( auto& factory = Config::Utility::getAndCheckFactory< Server::Configuration::NamedUpstreamNetworkFilterConfigFactory>(proto_config); auto message = factory.createEmptyConfigProto(); - Config::Utility::translateOpaqueConfig(proto_config.typed_config(), ProtobufWkt::Struct(), + Config::Utility::translateOpaqueConfig(proto_config.typed_config(), factory_context.messageValidationVisitor(), *message); Network::FilterFactoryCb callback = factory.createFilterFactoryFromProto(*message, *factory_context_); @@ -886,6 +982,10 @@ std::vector ClusterInfoImpl::upstreamHttpProtocol(absl::optional downstream_protocol) const { if (downstream_protocol.has_value() && features_ & Upstream::ClusterInfo::Features::USE_DOWNSTREAM_PROTOCOL) { + if (downstream_protocol.value() == Http::Protocol::Http3 && + !(features_ & Upstream::ClusterInfo::Features::HTTP3)) { + return {Http::Protocol::Http2}; + } return {downstream_protocol.value()}; } @@ -1113,6 +1213,11 @@ void ClusterImplBase::setOutlierDetector(const Outlier::DetectorSharedPtr& outli [this](const HostSharedPtr& host) -> void { reloadHealthyHosts(host); }); } +void ClusterImplBase::setTransportFactoryContext( + Server::Configuration::TransportSocketFactoryContextPtr transport_factory_context) { + transport_factory_context_ = std::move(transport_factory_context); +} + void ClusterImplBase::reloadHealthyHosts(const HostSharedPtr& host) { // Every time a host changes Health Check state we cause a full healthy host recalculation which // for expensive LBs (ring, subset, etc.) can be quite time consuming. During startup, this @@ -1366,8 +1471,16 @@ void PriorityStateManager::updateClusterPrioritySet( LocalityWeightsSharedPtr locality_weights; std::vector per_locality; - // If we are configured for locality weighted LB we populate the locality weights. - const bool locality_weighted_lb = parent_.info()->lbConfig().has_locality_weighted_lb_config(); + // If we are configured for locality weighted LB we populate the locality weights. We also + // populate locality weights if the cluster uses load balancing extensions, since the extension + // may want to make use of locality weights and we cannot tell by inspecting the config whether + // this is the case. + // + // TODO: have the load balancing extension indicate, programmatically, whether it needs locality + // weights, as an optimization in cases where it doesn't. + const bool locality_weighted_lb = + parent_.info()->lbConfig().has_locality_weighted_lb_config() || + parent_.info()->lbType() == LoadBalancerType::LoadBalancingPolicyConfig; if (locality_weighted_lb) { locality_weights = std::make_shared(); } @@ -1430,8 +1543,7 @@ void PriorityStateManager::updateClusterPrioritySet( bool BaseDynamicClusterImpl::updateDynamicHostList( const HostVector& new_hosts, HostVector& current_priority_hosts, HostVector& hosts_added_to_current_priority, HostVector& hosts_removed_from_current_priority, - HostMap& updated_hosts, const HostMap& all_hosts, - const absl::flat_hash_set& all_new_hosts) { + const HostMap& all_hosts, const absl::flat_hash_set& all_new_hosts) { uint64_t max_host_weight = 1; // Did hosts change? @@ -1460,10 +1572,6 @@ bool BaseDynamicClusterImpl::updateDynamicHostList( absl::flat_hash_set new_hosts_for_current_priority(new_hosts.size()); HostVector final_hosts; for (const HostSharedPtr& host : new_hosts) { - if (updated_hosts.count(host->address()->asString())) { - continue; - } - // To match a new host with an existing host means comparing their addresses. auto existing_host = all_hosts.find(host->address()->asString()); const bool existing_host_found = existing_host != all_hosts.end(); @@ -1539,7 +1647,6 @@ bool BaseDynamicClusterImpl::updateDynamicHostList( } final_hosts.push_back(existing_host->second); - updated_hosts[existing_host->second->address()->asString()] = existing_host->second; } else { new_hosts_for_current_priority.emplace(host->address()->asString()); if (host->weight() > max_host_weight) { @@ -1557,7 +1664,6 @@ bool BaseDynamicClusterImpl::updateDynamicHostList( } } - updated_hosts[host->address()->asString()] = host; final_hosts.push_back(host); hosts_added_to_current_priority.push_back(host); } @@ -1598,8 +1704,8 @@ bool BaseDynamicClusterImpl::updateDynamicHostList( if (!current_priority_hosts.empty() && dont_remove_healthy_hosts) { erase_from = std::remove_if(current_priority_hosts.begin(), current_priority_hosts.end(), - [&all_new_hosts, &new_hosts_for_current_priority, &updated_hosts, - &final_hosts, &max_host_weight](const HostSharedPtr& p) { + [&all_new_hosts, &new_hosts_for_current_priority, &final_hosts, + &max_host_weight](const HostSharedPtr& p) { if (all_new_hosts.contains(p->address()->asString()) && !new_hosts_for_current_priority.contains(p->address()->asString())) { // If the address is being completely deleted from this priority, but is @@ -1617,7 +1723,6 @@ bool BaseDynamicClusterImpl::updateDynamicHostList( } final_hosts.push_back(p); - updated_hosts[p->address()->asString()] = p; p->healthFlagSet(Host::HealthFlag::PENDING_DYNAMIC_REMOVAL); return true; } diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index ec2eb3e147392..91b9c2133a887 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -153,6 +153,10 @@ class HostDescriptionImpl : virtual public HostDescription, const envoy::config::core::v3::Metadata* metadata) const; MonotonicTime creationTime() const override { return creation_time_; } + void setAddressList(const std::vector& address_list) { + address_list_ = address_list; + } + protected: void setAddress(Network::Address::InstanceConstSharedPtr address) { address_ = address; } @@ -265,6 +269,7 @@ class HostImpl : public HostDescriptionImpl, static Network::ClientConnectionPtr createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& cluster, const Network::Address::InstanceConstSharedPtr& address, + const std::vector& address_list, Network::TransportSocketFactory& socket_factory, const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsConstSharedPtr transport_socket_options); @@ -480,10 +485,15 @@ class PrioritySetImpl : public PrioritySet { void updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, const HostVector& hosts_removed, - absl::optional overprovisioning_factor = absl::nullopt) override; + absl::optional overprovisioning_factor = absl::nullopt, + HostMapConstSharedPtr cross_priority_host_map = nullptr) override; void batchHostUpdate(BatchUpdateCb& callback) override; + HostMapConstSharedPtr crossPriorityHostMap() const override { + return const_cross_priority_host_map_; + } + protected: // Allows subclasses of PrioritySetImpl to create their own type of HostSetImpl. virtual HostSetImplPtr createHostSet(uint32_t priority, @@ -504,6 +514,9 @@ class PrioritySetImpl : public PrioritySet { // avoid any potential lifetime issues. std::vector> host_sets_; + // Read only all host map for fast host searching. This will never be null. + mutable HostMapConstSharedPtr const_cross_priority_host_map_{std::make_shared()}; + private: // This is a matching vector to store the callback handles for host_sets_. It is kept separately // because host_sets_ is directly returned so we avoid translation. @@ -539,6 +552,26 @@ class PrioritySetImpl : public PrioritySet { }; }; +/** + * Specialized PrioritySetImpl designed for the main thread. It will update and maintain the read + * only cross priority host map when the host set changes. + */ +class MainPrioritySetImpl : public PrioritySetImpl, public Logger::Loggable { +public: + // PrioritySet + void updateHosts(uint32_t priority, UpdateHostsParams&& update_hosts_params, + LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, + const HostVector& hosts_removed, + absl::optional overprovisioning_factor = absl::nullopt, + HostMapConstSharedPtr cross_priority_host_map = nullptr) override; + HostMapConstSharedPtr crossPriorityHostMap() const override; + +protected: + void updateCrossPriorityHostMap(const HostVector& hosts_added, const HostVector& hosts_removed); + + mutable HostMapSharedPtr mutable_cross_priority_host_map_; +}; + /** * Implementation of ClusterInfo that reads from JSON. */ @@ -568,6 +601,11 @@ class ClusterInfoImpl : public ClusterInfo, // Upstream::ClusterInfo bool addedViaApi() const override { return added_via_api_; } + const envoy::config::cluster::v3::LoadBalancingPolicy_Policy& + loadBalancingPolicy() const override { + return load_balancing_policy_; + } + TypedLoadBalancerFactory* loadBalancerFactory() const override { return load_balancer_factory_; } const envoy::config::cluster::v3::Cluster::CommonLbConfig& lbConfig() const override { return common_lb_config_; } @@ -751,6 +789,8 @@ class ClusterInfoImpl : public ClusterInfo, LoadBalancerSubsetInfoImpl lb_subset_; const envoy::config::core::v3::Metadata metadata_; Envoy::Config::TypedMetadataImpl typed_metadata_; + envoy::config::cluster::v3::LoadBalancingPolicy_Policy load_balancing_policy_; + TypedLoadBalancerFactory* load_balancer_factory_ = nullptr; const envoy::config::cluster::v3::Cluster::CommonLbConfig common_lb_config_; const Network::ConnectionSocket::OptionsSharedPtr cluster_socket_options_; const bool drain_connections_on_host_removal_; @@ -799,6 +839,13 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable; @@ -958,8 +1006,6 @@ class BaseDynamicClusterImpl : public ClusterImplBase { * @param hosts_added_to_current_priority will be populated with hosts added to the priority. * @param hosts_removed_from_current_priority will be populated with hosts removed from the * priority. - * @param updated_hosts is used to aggregate the new state of all hosts across priority, and will - * be updated with the hosts that remain in this priority after the update. * @param all_hosts all known hosts prior to this host update across all priorities. * @param all_new_hosts addresses of all hosts in the new configuration across all priorities. * @return whether the hosts for the priority changed. @@ -967,7 +1013,7 @@ class BaseDynamicClusterImpl : public ClusterImplBase { bool updateDynamicHostList(const HostVector& new_hosts, HostVector& current_priority_hosts, HostVector& hosts_added_to_current_priority, HostVector& hosts_removed_from_current_priority, - HostMap& updated_hosts, const HostMap& all_hosts, + const HostMap& all_hosts, const absl::flat_hash_set& all_new_hosts); }; diff --git a/source/common/upstream/wrsq_scheduler.h b/source/common/upstream/wrsq_scheduler.h new file mode 100644 index 0000000000000..ca9e3f88cf693 --- /dev/null +++ b/source/common/upstream/wrsq_scheduler.h @@ -0,0 +1,191 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "envoy/common/random_generator.h" +#include "envoy/upstream/scheduler.h" + +#include "source/common/common/assert.h" +#include "source/common/common/logger.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { +namespace Upstream { + +// Weighted Random Selection Queue (WRSQ) Scheduler +// ------------------------------------------------ +// This scheduler keeps a queue for each unique weight among all objects inserted and adds the +// objects to their respective queue based on weight. When performing a pick operation, a queue is +// selected and an object is pulled. Each queue gets its own selection probability which is weighted +// as the sum of all weights of objects contained within. Once a queue is picked, you can simply +// pull from the top and honor the expected selection probability of each object. +// +// Adding an object will cause the scheduler to rebuild internal structures on the first pick that +// follows. This first pick operation will be linear on the number of unique weights among objects +// inserted. Subsequent picks will be logarithmic with the number of unique weights. Adding objects +// is always constant time. +// +// For the case where all object weights are the same, WRSQ behaves identical to vanilla +// round-robin. If all object weights are different, it behaves identical to weighted random +// selection. +// +// NOTE: While the base scheduler interface allows for mutation of object weights with each pick, +// this implementation is not meant for circumstances where the object weights change with each pick +// (like in the least request LB). This scheduler implementation will perform quite poorly if the +// object weights change often. +template +class WRSQScheduler : public Scheduler, protected Logger::Loggable { +public: + WRSQScheduler(Random::RandomGenerator& random) : random_(random) {} + + std::shared_ptr peekAgain(std::function calculate_weight) override { + std::shared_ptr picked{pickAndAddInternal(calculate_weight)}; + if (picked != nullptr) { + prepick_queue_.emplace(picked); + } + return picked; + } + + std::shared_ptr pickAndAdd(std::function calculate_weight) override { + // Burn through the pre-pick queue. + while (!prepick_queue_.empty()) { + std::shared_ptr prepicked_obj = prepick_queue_.front().lock(); + prepick_queue_.pop(); + if (prepicked_obj != nullptr) { + return prepicked_obj; + } + } + + return pickAndAddInternal(calculate_weight); + } + + void add(double weight, std::shared_ptr entry) override { + rebuild_cumulative_weights_ = true; + queue_map_[weight].emplace(std::move(entry)); + } + + bool empty() const override { return queue_map_.empty(); } + +private: + using ObjQueue = std::queue>; + + // TODO(tonya11en): We can reduce memory utilization by using an absl::flat_hash_map of QueueInfo + // with heterogeneous lookup on the weight. This would allow us to save 8 bytes per unique weight. + using QueueMap = absl::flat_hash_map; + + // Used to store a queue's weight info necessary to perform the weighted random selection. + struct QueueInfo { + double cumulative_weight; + double weight; + ObjQueue& q; + }; + + // If needed, such as after object expiry or addition, rebuild the cumulative weights vector. + void maybeRebuildCumulativeWeights() { + if (!rebuild_cumulative_weights_) { + return; + } + + cumulative_weights_.clear(); + cumulative_weights_.reserve(queue_map_.size()); + + double weight_sum = 0; + for (auto& it : queue_map_) { + const auto weight_val = it.first; + weight_sum += weight_val * it.second.size(); + cumulative_weights_.push_back({weight_sum, weight_val, it.second}); + } + + rebuild_cumulative_weights_ = false; + } + + // Performs a weighted random selection on the queues containing objects of the same weight. + // Popping off the top of the queue to pick an object will honor the selection probability based + // on the weight provided when the object was added. + QueueInfo& chooseQueue() { + ASSERT(!queue_map_.empty()); + + maybeRebuildCumulativeWeights(); + + const double weight_sum = cumulative_weights_.back().cumulative_weight; + uint64_t rnum = random_.random() % static_cast(weight_sum); + auto it = std::upper_bound(cumulative_weights_.begin(), cumulative_weights_.end(), rnum, + [](auto a, auto b) { return a < b.cumulative_weight; }); + ASSERT(it != cumulative_weights_.end()); + return *it; + } + + // Remove objects from the queue until it's empty or there is an unexpired object at the front. If + // the queue is purged to empty, it's removed from the queue map and we return true. + bool purgeExpired(QueueInfo& qinfo) { + while (!qinfo.q.empty() && qinfo.q.front().expired()) { + qinfo.q.pop(); + rebuild_cumulative_weights_ = true; + } + + if (qinfo.q.empty()) { + queue_map_.erase(qinfo.weight); + return true; + } + return false; + } + + std::shared_ptr pickAndAddInternal(std::function calculate_weight) { + while (!queue_map_.empty()) { + QueueInfo& qinfo = chooseQueue(); + if (purgeExpired(qinfo)) { + // The chosen queue was purged to empty and removed from the queue map. Try again. + continue; + } + + auto obj = qinfo.q.front().lock(); + qinfo.q.pop(); + if (obj == nullptr) { + // The object expired after the purge. + continue; + } + + const double new_weight = calculate_weight ? calculate_weight(*obj) : qinfo.weight; + if (new_weight == qinfo.weight) { + qinfo.q.emplace(obj); + } else { + // The weight has changed for this object, so we must re-add it to the scheduler. + ENVOY_LOG_EVERY_POW_2( + warn, "WRSQ scheduler is used with a load balancer that mutates host weights with each " + "selection, this will likely result in poor selection performance"); + add(new_weight, obj); + } + + return obj; + } + + return nullptr; + } + + Random::RandomGenerator& random_; + + // Objects already picked via peekAgain(). + ObjQueue prepick_queue_; + + // A mapping from an object weight to the associated queue. + QueueMap queue_map_; + + // Stores the necessary information to perform a weighted random selection of the different + // queues. A cumulative sum is also kept of the total object weights for a queue, which allows for + // a single random number generation and a binary search to pick a queue. + std::vector cumulative_weights_; + + // Keeps state that determines whether the cumulative weights need to be rebuilt. If any objects + // contained in a queue change from addition or expiry, it throws off the cumulative weight + // values. Therefore, they must be recalculated. + bool rebuild_cumulative_weights_{true}; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/watchdog/abort_action_config.cc b/source/common/watchdog/abort_action_config.cc index ad775743598a8..5d8be8002d62b 100644 --- a/source/common/watchdog/abort_action_config.cc +++ b/source/common/watchdog/abort_action_config.cc @@ -13,7 +13,7 @@ Server::Configuration::GuardDogActionPtr AbortActionFactory::createGuardDogActio const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config, Server::Configuration::GuardDogActionFactoryContext& context) { AbortActionConfig message; - Config::Utility::translateOpaqueConfig(config.config().typed_config(), ProtobufWkt::Struct(), + Config::Utility::translateOpaqueConfig(config.config().typed_config(), ProtobufMessage::getStrictValidationVisitor(), message); return std::make_unique(message, context); } diff --git a/source/docs/h2_metadata.md b/source/docs/h2_metadata.md index a45fb1f4f59ba..71d5eba1a32ca 100644 --- a/source/docs/h2_metadata.md +++ b/source/docs/h2_metadata.md @@ -1,12 +1,12 @@ ### Overview -Envoy provides a way for users to communicate extra information associating with a stream that is -not carried in the standard HTTP(s) and HTTP/2 headers and payloads. The -information is represented by a string key-value pair. For example, users can -pass along RTT information associated with a stream using a key of "rtt info", and a value of -"100ms". In Envoy, we call this type of information metadata. -A stream can be associated with multiple metadata, and the multiple metadata -are represented by a map. +Envoy provides a way for users to communicate extra information associated with +a stream that is not carried in the standard HTTP(s) and HTTP/2 headers and +payloads. The information is represented by a string key-value pair. For +example, users can pass along RTT information associated with a stream using a +key of "rtt info", and a value of "100ms". In Envoy, we call this type of +information metadata. A stream can be associated with multiple metadata, and +the multiple metadata are represented by a map. Note: the metadata implementation is still in progress, and the doc is in draft version. @@ -14,87 +14,107 @@ version. ### Limitation and conditions For ease of implementation and compatibility purposes, metadata will only be -supported in HTTP/2. Metadata sent in any other protocol should result in protocol -errors or be ignored. +supported in HTTP/2. Metadata sent in any other protocol should result in +protocol errors or be ignored. -To simplify the implementation, we don't allow metadata frames to carry end of -stream flag. Because metadata frames must be associated with an existing stream, users must -ensure metadata frames to be received before the end of stream is received by the -peer. +To simplify the implementation, we don't allow metadata frames to carry the +END\_STREAM flag. Because metadata frames must be associated with an existing +stream, users must ensure that the peer receives metadata frames before a frame +carrying the END\_STREAM flag. -Metadata associated with a stream can be sent before headers, after headers, -between data or after data. If metadata frames have to be sent last, -users must put the end of stream in an empty data frame and send the empty data frame after metadata frames. +Metadata associated with a stream can be sent before HEADERS, after HEADERS, +between DATA, or after DATA. If metadata frames have to be sent last, users must +put the END\_STREAM flag in an empty DATA frame and send the empty DATA frame +after metadata frames. Envoy only allows up to 1M metadata to be sent per stream. If the accumulated -metadata size exceeds the limit, the stream will be reset. +metadata size exceeds the limit at the sender, some metadata will be dropped. If +the limit is exceeded at the receiver, the connection will fail. ### Envoy metadata handling -Envoy provides the functionality to proxy, process and add metadata. +Envoy provides the functionality to proxy, process, and add metadata. ## Proxying metadata If not specified, all the metadata received by Envoy is proxied to the next hop -unmodified. Note that, we do not guarantee the same frame order will be preserved from -hop by hop. That is, metadata from upstream at the beginning of a stream can be -received by the downstream at the end of the stream. +unmodified. Note that we do not guarantee the same frame order will be +preserved from hop by hop. That is, metadata from upstream at the beginning of a +stream can be received by the downstream at the end of the stream. ## Consuming metadata If Envoy needs to take actions when a metadata frame is received, users should create a new filter. -If Envoy needs to parse a metadata sent on a request from downstream to upstream, a -StreamDecodeFilter should be created. The interface to override is +If Envoy needs to parse metadata sent on a request from downstream to upstream, +a StreamDecoderFilter should be created. The interface to override is: -FilterMetadataStatus StreamDecoderFilter::decodeMetadata(MetadataMap& metadata\_map); +FilterMetadataStatus StreamDecoderFilter::decodeMetadata(MetadataMap& +metadata\_map); -The metadata passed in is a map of the metadata associated with the request stream. After metadata -have been parsed, the filter can choose to remove metadata from the map, or keep -it untouched. +The metadata passed in is a map of the metadata associated with the request +stream. After metadata has been parsed, the filter can choose to add additional +metadata, remove metadata, or keep it untouched by modifying the passed in +`metadata_map` directly. -If Envoy needs to parse a metadata sent on a response from upstream to downstream, a -StreamEncoderFilter should be created. The interface to override is +If Envoy needs to parse metadata sent on a response from upstream to downstream, +a StreamEncoderFilter should be created. The interface to override is: -FilterMetadatasStatus StreamEncoderFilter::encodeMetadata(MetadataMap& metadata); +FilterMetadataStatus StreamEncoderFilter::encodeMetadata(MetadataMap& +metadata); -The metadata passed in is a map of the metadata associated with the response stream. After metadata -have been parsed, the filter can choose to remove metadata from the map, or keep -it untouched. +The metadata passed in is a map of the metadata associated with the response +stream. After metadata has been parsed, the filter can choose to add additional +metadata, remove metadata, or keep it untouched by modifying the passed in +`metadata_map` directly. -Note that, if the metadata in a request or a response is removed from the map after consuming, the metadata -will not be passed to the next hop. An empty map means no metadata will be sent to the next hop. -If the metadata is left in the map, it will be passed to the next hop. +Note that if the metadata in a request or a response is removed from the map +after consuming, the metadata will not be passed to the next hop. An empty map +means no metadata will be sent to the next hop. If the metadata is left in the +map, it will be passed to the next hop. ## Inserting metadata Envoy filters can be used to add new metadata to a stream. If users need to add new metadata for a request from downstream to upstream, a -StreamDecoderFilter should be created. The StreamDecoderFilterCallbacks object that Envoy passes to the -StreamDecoderFilter has an interface MetadataMapVector& +StreamDecoderFilter should be created. The StreamDecoderFilterCallbacks object +that Envoy passes to the StreamDecoderFilter has an interface MetadataMapVector& StreamDecoderFilterCallbacks::addDecodedMetadata(). By calling the interface, -users get a reference to a vector of metadata map associated with the request stream. Users can -insert new metadata map to the metadata map vector, and Envoy will proxy the new metadata -map to the upstream. StreamDecoderFilterCallbacks::addDecodedMetadata() can be called in -StreamDecoderFilter::decodeHeaders(), StreamDecoderFilter::decodeData() and -StreamDecoderFilter::decodeTrailers(). Do not call -StreamDecoderFilterCallbacks::addDecodedMetadata() in -StreamDecoderFilter::decodeMetadata(MetadataMap& metadata\_map). New metadata can -be added directly to metadata\_map. - -If users need to add new metadata for a response to downstream, a -StreamEncoderFilter should be created. Users pass the metadata to be added to +users get a reference to a vector of metadata maps associated with the request +stream. Users can insert new metadata maps to the metadata map vector, and Envoy +will proxy the new metadata map to the upstream. +StreamDecoderFilterCallbacks::addDecodedMetadata() can be called in the +StreamDecoderFilter::decode*() methods except for decodeMetadata(): +StreamDecoderFilter::decodeHeaders(), StreamDecoderFilter::decodeData(), +StreamDecoderFilter::decodeTrailers(), and +StreamDecoderFilter::decodeComplete(). + +Do not call StreamDecoderFilterCallbacks::addDecodedMetadata() in +StreamDecoderFilter::decodeMetadata(MetadataMap& metadata\_map). Instead, new +metadata can be added directly to `metadata\_map`. + +If users need to add new metadata for a response from upstream to downstream, a +StreamEncoderFilter should be created. The StreamEncoderFilterCallbacks object +that Envoy passes to the StreamEncoderFilter has an interface StreamEncoderFilterCallbacks::addEncodedMetadata(MetadataMapPtr&& -metadata\_map\_ptr). This function can be called in -StreamEncoderFilter::encode100ContinueHeaders(HeaderMap& headers), StreamEncoderFilter::encodeHeaders(HeaderMap& headers, bool end\_stream), -StreamEncoderFilter::encodeData(Buffer::Instance& data, bool end\_stream), StreamEncoderFilter::encodeTrailers(HeaderMap& trailers). -Consequently, the new metadata will be passed through all the encoding filters that follow the filter -where the new metadata are added. - -If users receive metadata from upstream, new metadata can be added directly to -the input argument metadata\_map in StreamFilter::encodeMetadata(MetadataMap& metadata\_map). +metadata\_map\_ptr). By calling the interface, users can directly pass in the +metadata to be added. This function can be called in the +StreamEncoderFilter::encode*() methods except for encodeMetadata(): +StreamEncoderFilter::encode100ContinueHeaders(HeaderMap& headers), +StreamEncoderFilter::encodeHeaders(HeaderMap& headers, bool end\_stream), +StreamEncoderFilter::encodeData(Buffer::Instance& data, bool end\_stream), +StreamEncoderFilter::encodeTrailers(HeaderMap& trailers), and +StreamEncoderFilter::encodeComplete(). Consequently, the new metadata will be +passed through all the encoding filters that follow the filter where the new +metadata is added. + +Do not call StreamEncoderFilterCallbacks::addEncodedMetadata() in +StreamEncoderFilter::encodeMetadata(MetadataMap& metadata\_map). Instead, if +users receive metadata from upstream, new metadata can be added directly to the +input argument `metadata\_map` in +StreamEncoderFilter::encodeMetadata(MetadataMap& metadata\_map). ### Metadata implementation @@ -105,10 +125,10 @@ new extension frame type METADATA frame in nghttp2: type = 0x4D -The METADATA frame uses a standard frame header, as described in the -[HTTP/2 spec](https://httpwg.github.io/specs/rfc7540.html#FrameHeader.) -The payload of the METADATA frame is a block of key-value pairs encoded using the [HPACK Literal -Header Field Never Indexed representation]( +The METADATA frame uses a standard frame header, as described in the [HTTP/2 +spec](https://httpwg.github.io/specs/rfc7540.html#FrameHeader.) The payload of +the METADATA frame is a block of key-value pairs encoded using the [HPACK +Literal Header Field Never Indexed representation]( https://httpwg.org/specs/rfc7541.html#literal.header.never.indexed). Each key-value pair represents one piece of metadata. @@ -116,87 +136,96 @@ The METADATA frame defines the following flags: END\_METADATA (0x4). -If the flag is set, it indicates that this frame ends a metadata -payload. +If the flag is set, it indicates that this frame ends a metadata payload. The METADATA frame payload is not subject to HTTP/2 flow control, but the size of the payload is bounded by the maximum frame size negotiated in SETTINGS. -There are no restrictions on the set of octets that may be used in keys or values. +There are no restrictions on the set of octets that may be used in keys or +values. -We do not allow METADATA frame to terminate a stream. DATA, HEADERS or RST\_STREAM must -be used for that purpose. +We do not allow a METADATA frame to terminate a stream. DATA, HEADERS or +RST\_STREAM must be used for that purpose. ## Response metadata handling We call metadata that need to be forwarded to downstream the response metadata. Response metadata can be received from upstream or generated locally. -Response metadata is generally a hop by hop message, so Envoy doesn't -need to hold response metadata locally to wait for some events or data. As a result, -filters handling response metadata don't need to stop the filter iteration and wait. Instead response -metadata can be forwarded through targeted filters and sequentially to the -next hop as soon as they are -available, no matter if the metadata are locally generated or received from -upstream. The same statement is also true for metadata from downstream to upstream (request metadata). However, -request metadata may need to wait for the upstream connection to be ready before going to the next hop. -In this section, we focus on response metadata handling. - -We first explain how response metadata get consumed or proxied. -In function EnvoyConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilter\* filter, -MetadataMapPtr&& metadata\_map\_ptr), Envoy passes response metadata received from upstream to filters by -calling the following filter interface: - -FilterMetadatasStatus StreamEncoderFilter::encodeMetadata(MetadataMapVector& metadata\_map). - -Filters, by implementing the interface, can consume response metadata. After going through -the filter chain, function EnvoyConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilter\* filter, -MetadataMapPtr&& metadata\_map\_ptr) immediately forwards the updated or remaining response metadata to the next hop by -calling the metadata encoding function in codec: - -ConnectionManagerImpl::ActiveStream::response\_encoder\_-\>encodeMetadata(MetadataMapVector& metadata\_map\_vector). +Response metadata is generally a hop by hop message, so Envoy doesn't need to +hold response metadata locally to wait for some events or data. As a result, +filters handling response metadata don't need to stop the filter iteration and +wait. Instead response metadata can be forwarded through targeted filters and +sequentially to the next hop as soon as they are available, no matter if the +metadata are locally generated or received from upstream. The same statement is +also true for metadata from downstream to upstream (request metadata). However, +request metadata may need to wait for the upstream connection to be ready before +going to the next hop. In this section, we focus on response metadata handling. + +We first explain how response metadata gets consumed or proxied. In function +FilterManager::encodeMetadata(ActiveStreamEncoderFilter\* filter, +MetadataMapPtr&& metadata\_map\_ptr), Envoy passes response metadata received +from upstream to filters by calling the following filter interface: + +FilterMetadatasStatus StreamEncoderFilter::encodeMetadata(MetadataMapVector& +metadata\_map). + +Filters, by implementing the interface, can consume response metadata. After +going through the filter chain, the function +FilterManager::encodeMetadata(ActiveStreamEncoderFilter\* filter, +MetadataMapPtr&& metadata\_map\_ptr) immediately forwards the updated or +remaining response metadata to the next hop by calling a metadata-encoding +function that calls the codec's encoding: + +ConnectionManagerImpl::ActiveStream::encodeMetadata(MetadataMapVector& +metadata). If no filter consumes the response metadata, the response metadata is proxied to the downstream untouched. -Envoy can also add new response metadata through filters's encoding interfaces (See section -[Inserting metadata](#inserting-metadata) for detailed interfaces). Filters can add new -metadata by calling StreamDecoderFilterCallbacks::encodeMetadata(MetadataMapPtr&& metadata\_map\_ptr), -which triggers -ConnectionManagerImpl::ActiveStream::encodeMetadata(ActiveStreamEncoderFilter\* filter, MetadataMapPtr&& metadata\_map) -to go through all the encoding filters. -Or new metadata can be added to metadata\_map in -StreamFilter::encodeMetadata(MetadataMap& metadata\_map) directly. +Envoy can also add new response metadata through filters's encoding interfaces +(See section [Inserting metadata](#inserting-metadata) for detailed interfaces). +Filters can add new metadata by calling +StreamEncoderFilterCallbacks::addEncodedMetadata(MetadataMapPtr&& +metadata\_map\_ptr), which triggers +FilterManager::encodeMetadata(ActiveStreamEncoderFilter\* filter, +MetadataMapPtr&& metadata\_map\_ptr) to go through all the encoding filters. Or +new metadata can be added to `metadata\_map` in +StreamEncoderFilter::encodeMetadata(MetadataMap& metadata\_map) directly. ## Request metadata handling -We first explain how request metadata get consumed or proxied. -In function EnvoyConnectionManagerImpl::ActiveStream::decodeMetadata(ActiveStreamDecoderFilter\* filter, -MetadataMap& metadata\_map), Envoy passes request metadata received from downstream to filters by -calling the following filter interface: +We first explain how request metadata gets consumed or proxied. In function +FilterManager::decodeMetadata(ActiveStreamDecoderFilter\* filter, MetadataMap& +metadata\_map), Envoy passes request metadata received from downstream to +filters by calling the following filter interface: -FilterMetadatasStatus StreamDecoderFilter::decodeMetadata(MetadataMap& metadata\_map). +FilterMetadataStatus StreamDecoderFilter::decodeMetadata(MetadataMap& +metadata\_map). -Filters, by implementing the interface, can consume or modify request metadata. If no filter -touches the metadata, it is proxied to upstream unchanged. +Filters, by implementing the interface, can consume or modify request metadata. +If no filter touches the metadata, it is proxied to upstream unchanged. The last filter in the filter chain is router filter. The router filter calls -Filter::request\_encoder\_-\>encodeMetadata(const MetadataMapVector& metadata\_map\_vector) to pass -the metadata to codec, and codec encodes and forwards the metadata to the upstream. If the connection -to the upstream has not been established when metadata is received, the metadata is temporarily stored in -Filter::downstream\_metadata\_map\_vector\_. When the connection is ready -(Filter::UpstreamRequest::onPoolReady()), the metadata is then passed to codec, and forwarded to -the upstream. - -Envoy can also add new request metadata through filters's decoding interfaces (See section -[Inserting metadata](#inserting-metadata) for detailed interfaces). Filters can add new -metadata to ActiveStream::request\_metadata\_map\_vector\_ by calling -StreamDecoderFilterCallbacks::addDecodedMetadata(). After calling each filter's decoding function, -Envoy checks if new metadata is added to ActiveStream::request\_metadata\_map\_vector\_. If so, -then Envoy calls ConnectionManagerImpl::ActiveStream::decodeMetadata(ActiveStreamEncoderFilter\* filter, +UpstreamRequest::encodeMetadata(const MetadataMapVector& metadata\_map\_vector) +to pass the metadata to the codec, and the codec encodes and forwards the +metadata to the upstream. If the connection to the upstream has not been +established when metadata is received, the metadata is temporarily stored in +UpstreamRequest::downstream\_metadata\_map\_vector\_. When the connection is +ready (UpstreamRequest::onPoolReady()), the metadata is then passed to the codec +and forwarded to the upstream. + +Envoy can also add new request metadata through filters's decoding interfaces +(See section [Inserting metadata](#inserting-metadata) for detailed interfaces). +Filters can add new metadata to FilterManager::request\_metadata\_map\_vector\_ +by calling StreamDecoderFilterCallbacks::addDecodedMetadata(). After calling +each filter's decoding function from StreamDecoderFilter, Envoy checks if new +metadata is added to FilterManager::request\_metadata\_map\_vector\_ via +FilterManager::processNewlyAddedMetadata(). If so, then Envoy calls +FilterManager::decodeMetadata(ActiveStreamEncoderFilter\* filter, MetadataMapPtr&& metadata\_map) to go through all the filters. -Note that, because metadata frames do not carry end\_stream, if new metadata is added to a headers -only request, Envoy moves end\_stream from headers to an empty data frame which is sent after the new -metadata. In addition, Envoy drains metadata in router filter before any other types of -frames except headers to make sure end\_stream is handled correctly. - +Note that because METADATA frames do not carry END\_STREAM, if new metadata is +added to a headers only request, Envoy moves END\_STREAM from HEADERS to an +empty DATA frame that is sent after the new metadata. In addition, Envoy drains +metadata in the router filter before any other types of frames except HEADERS to +make sure END\_STREAM is handled correctly. diff --git a/source/docs/network_filter_fuzzing.md b/source/docs/network_filter_fuzzing.md index 8e40210cac24e..f23354423bcd0 100644 --- a/source/docs/network_filter_fuzzing.md +++ b/source/docs/network_filter_fuzzing.md @@ -33,9 +33,8 @@ envoy_cc_fuzz_test( deps = [ ":uber_writefilter_lib", "//source/common/config:utility_lib", - "//source/extensions/filters/network/kafka:kafka_broker_config_lib", "//source/extensions/filters/network/mongo_proxy:config", - "//source/extensions/filters/network/mysql_proxy:config", + "//contrib/mysql_proxy/filters/network/source:config", "//source/extensions/filters/network/zookeeper_proxy:config", "//source/extensions/filters/network/the_new_filter_created_by_you:config", // <---Add the filter config module here "//test/config:utility_lib", diff --git a/source/extensions/access_loggers/common/grpc_access_logger.h b/source/extensions/access_loggers/common/grpc_access_logger.h index 2e008a9c1522f..416d19164662e 100644 --- a/source/extensions/access_loggers/common/grpc_access_logger.h +++ b/source/extensions/access_loggers/common/grpc_access_logger.h @@ -68,22 +68,16 @@ template class GrpcAccessLogge * @param config supplies the configuration for the logger. * @return GrpcAccessLoggerSharedPtr ready for logging requests. */ - virtual typename GrpcAccessLogger::SharedPtr - getOrCreateLogger(const ConfigProto& config, - envoy::config::core::v3::ApiVersion transport_version, - GrpcAccessLoggerType logger_type, Stats::Scope& scope) PURE; + virtual typename GrpcAccessLogger::SharedPtr getOrCreateLogger(const ConfigProto& config, + GrpcAccessLoggerType logger_type, + Stats::Scope& scope) PURE; }; template class GrpcAccessLogClient { public: GrpcAccessLogClient(const Grpc::RawAsyncClientSharedPtr& client, const Protobuf::MethodDescriptor& service_method) - : GrpcAccessLogClient(client, service_method, absl::nullopt) {} - GrpcAccessLogClient(const Grpc::RawAsyncClientSharedPtr& client, - const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version) - : client_(client), service_method_(service_method), - transport_api_version_(transport_api_version) {} + : client_(client), service_method_(service_method) {} public: struct LocalStream : public Grpc::AsyncStreamCallbacks { @@ -123,11 +117,7 @@ template class GrpcAccessLogClient { if (stream_->stream_->isAboveWriteBufferHighWatermark()) { return false; } - if (transport_api_version_.has_value()) { - stream_->stream_->sendMessage(request, transport_api_version_.value(), false); - } else { - stream_->stream_->sendMessage(request, false); - } + stream_->stream_->sendMessage(request, false); } else { // Clear out the stream data due to stream creation failure. stream_.reset(); @@ -138,7 +128,6 @@ template class GrpcAccessLogClient { Grpc::AsyncClient client_; std::unique_ptr stream_; const Protobuf::MethodDescriptor& service_method_; - const absl::optional transport_api_version_; }; } // namespace Detail @@ -173,16 +162,7 @@ class GrpcAccessLogger : public Detail::GrpcAccessLoggerenableTimer(buffer_flush_interval_msec_); @@ -278,10 +258,9 @@ class GrpcAccessLoggerCache : public Singleton::Instance, }); } - typename GrpcAccessLogger::SharedPtr - getOrCreateLogger(const ConfigProto& config, - envoy::config::core::v3::ApiVersion transport_version, - GrpcAccessLoggerType logger_type, Stats::Scope& scope) override { + typename GrpcAccessLogger::SharedPtr getOrCreateLogger(const ConfigProto& config, + GrpcAccessLoggerType logger_type, + Stats::Scope& scope) override { // TODO(euroelessar): Consider cleaning up loggers. auto& cache = tls_slot_->getTyped(); const auto cache_key = std::make_pair(MessageUtil::hash(config), logger_type); @@ -290,7 +269,7 @@ class GrpcAccessLoggerCache : public Singleton::Instance, return it->second; } const auto logger = createLogger( - config, transport_version, + config, async_client_manager_.factoryForGrpcService(config.grpc_service(), scope_, false) ->createUncachedRawAsyncClient(), std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, buffer_flush_interval, 1000)), @@ -316,8 +295,7 @@ class GrpcAccessLoggerCache : public Singleton::Instance, // Create the specific logger type for this cache. virtual typename GrpcAccessLogger::SharedPtr - createLogger(const ConfigProto& config, envoy::config::core::v3::ApiVersion transport_version, - const Grpc::RawAsyncClientSharedPtr& client, + createLogger(const ConfigProto& config, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, Stats::Scope& scope) PURE; diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc index 1e5c68f0f3732..e544a52af1911 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.cc @@ -18,15 +18,11 @@ namespace GrpcCommon { GrpcAccessLoggerImpl::GrpcAccessLoggerImpl( const Grpc::RawAsyncClientSharedPtr& client, std::string log_name, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, - envoy::config::core::v3::ApiVersion transport_api_version) - : GrpcAccessLogger( - std::move(client), buffer_flush_interval_msec, max_buffer_size_bytes, dispatcher, scope, - GRPC_LOG_STATS_PREFIX, - Grpc::VersionedMethods("envoy.service.accesslog.v3.AccessLogService.StreamAccessLogs", - "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs") - .getMethodDescriptorForVersion(transport_api_version), - transport_api_version), + Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope) + : GrpcAccessLogger(std::move(client), buffer_flush_interval_msec, max_buffer_size_bytes, + dispatcher, scope, GRPC_LOG_STATS_PREFIX, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.accesslog.v3.AccessLogService.StreamAccessLogs")), log_name_(log_name), local_info_(local_info) {} void GrpcAccessLoggerImpl::addEntry(envoy::data::accesslog::v3::HTTPAccessLogEntry&& entry) { @@ -55,13 +51,12 @@ GrpcAccessLoggerCacheImpl::GrpcAccessLoggerCacheImpl(Grpc::AsyncClientManager& a GrpcAccessLoggerImpl::SharedPtr GrpcAccessLoggerCacheImpl::createLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion transport_version, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, Stats::Scope& scope) { return std::make_shared(client, config.log_name(), buffer_flush_interval_msec, max_buffer_size_bytes, - dispatcher, local_info_, scope, transport_version); + dispatcher, local_info_, scope); } } // namespace GrpcCommon diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h index 07bf8400fdc1c..43b5423274762 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/grpc_access_log_impl.h @@ -26,8 +26,7 @@ class GrpcAccessLoggerImpl GrpcAccessLoggerImpl(const Grpc::RawAsyncClientSharedPtr& client, std::string log_name, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, - const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, - envoy::config::core::v3::ApiVersion transport_api_version); + const LocalInfo::LocalInfo& local_info, Stats::Scope& scope); private: // Extensions::AccessLoggers::GrpcCommon::GrpcAccessLogger @@ -53,7 +52,6 @@ class GrpcAccessLoggerCacheImpl // Common::GrpcAccessLoggerCache GrpcAccessLoggerImpl::SharedPtr createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion transport_version, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, Stats::Scope& scope) override; diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.cc b/source/extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.cc index e9346911355a0..28443fe69ea08 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_proto_descriptors.cc @@ -10,7 +10,7 @@ namespace AccessLoggers { namespace GrpcCommon { void validateProtoDescriptors() { - const auto method = "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs"; + const auto method = "envoy.service.accesslog.v3.AccessLogService.StreamAccessLogs"; RELEASE_ASSERT(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) != nullptr, ""); diff --git a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc index 64e9923aaf450..01ccb77d85fc6 100644 --- a/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc +++ b/source/extensions/access_loggers/grpc/grpc_access_log_utils.cc @@ -166,10 +166,10 @@ void Utility::extractCommonAccessLogProperties( *stream_info.downstreamAddressProvider().localAddress(), *common_access_log.mutable_downstream_local_address()); } - if (stream_info.downstreamSslConnection() != nullptr) { + if (stream_info.downstreamAddressProvider().sslConnection() != nullptr) { auto* tls_properties = common_access_log.mutable_tls_properties(); const Ssl::ConnectionInfoConstSharedPtr downstream_ssl_connection = - stream_info.downstreamSslConnection(); + stream_info.downstreamAddressProvider().sslConnection(); tls_properties->set_tls_sni_hostname( std::string(stream_info.downstreamAddressProvider().requestedServerName())); diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index cb37f1315efd2..f35715c37ad98 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -41,11 +41,10 @@ HttpGrpcAccessLog::HttpGrpcAccessLog( for (const auto& header : config_.additional_response_trailers_to_log()) { response_trailers_to_log_.emplace_back(header); } - - tls_slot_->set([this, transport_version = Envoy::Config::Utility::getAndCheckTransportVersion( - config_.common_config())](Event::Dispatcher&) { + Envoy::Config::Utility::checkTransportVersion(config_.common_config()); + tls_slot_->set([this](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), transport_version, Common::GrpcAccessLoggerType::HTTP, scope_)); + config_.common_config(), Common::GrpcAccessLoggerType::HTTP, scope_)); }); } diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc index c475f134a7b61..7fbcee911d5bc 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc @@ -24,10 +24,10 @@ TcpGrpcAccessLog::TcpGrpcAccessLog( Stats::Scope& scope) : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { - tls_slot_->set([this, transport_version = Config::Utility::getAndCheckTransportVersion( - config_.common_config())](Event::Dispatcher&) { + Config::Utility::checkTransportVersion(config_.common_config()); + tls_slot_->set([this](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), transport_version, Common::GrpcAccessLoggerType::TCP, scope_)); + config_.common_config(), Common::GrpcAccessLoggerType::TCP, scope_)); }); } diff --git a/source/extensions/access_loggers/open_telemetry/access_log_impl.cc b/source/extensions/access_loggers/open_telemetry/access_log_impl.cc index f3376483ad318..48c4166395daa 100644 --- a/source/extensions/access_loggers/open_telemetry/access_log_impl.cc +++ b/source/extensions/access_loggers/open_telemetry/access_log_impl.cc @@ -40,11 +40,10 @@ AccessLog::AccessLog( : Common::ImplBase(std::move(filter)), scope_(scope), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { - tls_slot_->set([this, config, - transport_version = Envoy::Config::Utility::getAndCheckTransportVersion( - config.common_config())](Event::Dispatcher&) { + Envoy::Config::Utility::checkTransportVersion(config.common_config()); + tls_slot_->set([this, config](Event::Dispatcher&) { return std::make_shared(access_logger_cache_->getOrCreateLogger( - config.common_config(), transport_version, Common::GrpcAccessLoggerType::HTTP, scope_)); + config.common_config(), Common::GrpcAccessLoggerType::HTTP, scope_)); }); ProtobufWkt::Struct body_format; diff --git a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc index cc84242c93d67..215f7cfba9e4e 100644 --- a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.cc @@ -23,15 +23,11 @@ namespace OpenTelemetry { GrpcAccessLoggerImpl::GrpcAccessLoggerImpl( const Grpc::RawAsyncClientSharedPtr& client, std::string log_name, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, - Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, - envoy::config::core::v3::ApiVersion transport_api_version) - : GrpcAccessLogger( - client, buffer_flush_interval_msec, max_buffer_size_bytes, dispatcher, scope, - GRPC_LOG_STATS_PREFIX, - Grpc::VersionedMethods("opentelemetry.proto.collector.logs.v1.LogsService.Export", - "opentelemetry.proto.collector.logs.v1.LogsService.Export") - .getMethodDescriptorForVersion(transport_api_version), - transport_api_version) { + Event::Dispatcher& dispatcher, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope) + : GrpcAccessLogger(client, buffer_flush_interval_msec, max_buffer_size_bytes, dispatcher, scope, + GRPC_LOG_STATS_PREFIX, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "opentelemetry.proto.collector.logs.v1.LogsService.Export")) { initMessageRoot(log_name, local_info); } @@ -78,13 +74,12 @@ GrpcAccessLoggerCacheImpl::GrpcAccessLoggerCacheImpl(Grpc::AsyncClientManager& a GrpcAccessLoggerImpl::SharedPtr GrpcAccessLoggerCacheImpl::createLogger( const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion transport_version, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, Stats::Scope& scope) { return std::make_shared(client, config.log_name(), buffer_flush_interval_msec, max_buffer_size_bytes, - dispatcher, local_info_, scope, transport_version); + dispatcher, local_info_, scope); } } // namespace OpenTelemetry diff --git a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h index 52e61e134ee3b..7af83f529de4c 100644 --- a/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h +++ b/source/extensions/access_loggers/open_telemetry/grpc_access_log_impl.h @@ -38,8 +38,7 @@ class GrpcAccessLoggerImpl GrpcAccessLoggerImpl(const Grpc::RawAsyncClientSharedPtr& client, std::string log_name, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, - const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, - envoy::config::core::v3::ApiVersion transport_api_version); + const LocalInfo::LocalInfo& local_info, Stats::Scope& scope); private: void initMessageRoot(const std::string& log_name, const LocalInfo::LocalInfo& local_info); @@ -67,7 +66,6 @@ class GrpcAccessLoggerCacheImpl // Common::GrpcAccessLoggerCache GrpcAccessLoggerImpl::SharedPtr createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion transport_version, const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, Stats::Scope& scope) override; diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index ef8d2c89c641a..cc72fc392466a 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -6,6 +6,7 @@ #include "source/common/network/transport_socket_options_impl.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h" +#include "source/extensions/filters/network/common/utility.h" namespace Envoy { namespace Extensions { @@ -176,18 +177,13 @@ ClusterFactory::createClusterWithConfig( Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context, Stats::ScopePtr&& stats_scope) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.tls(), - context.api().randomGenerator(), context.runtime(), context.stats()); + context.singletonManager(), context.dispatcher(), context.tls(), context.api(), + context.runtime(), context.stats(), context.messageValidationVisitor()); envoy::config::cluster::v3::Cluster cluster_config = cluster; - if (cluster_config.has_upstream_http_protocol_options()) { - if (!proto_config.allow_insecure_cluster_options() && - (!cluster_config.upstream_http_protocol_options().auto_sni() || - !cluster_config.upstream_http_protocol_options().auto_san_validation())) { - throw EnvoyException( - "dynamic_forward_proxy cluster must have auto_sni and auto_san_validation true when " - "configured with upstream_http_protocol_options"); - } - } else { + if (!cluster_config.has_upstream_http_protocol_options()) { + // This sets defaults which will only apply if using old style http config. + // They will be a no-op if typed_extension_protocol_options are used for + // http config. cluster_config.mutable_upstream_http_protocol_options()->set_auto_sni(true); cluster_config.mutable_upstream_http_protocol_options()->set_auto_san_validation(true); } @@ -195,6 +191,18 @@ ClusterFactory::createClusterWithConfig( auto new_cluster = std::make_shared( cluster_config, proto_config, context.runtime(), cache_manager_factory, context.localInfo(), socket_factory_context, std::move(stats_scope), context.addedViaApi()); + + auto& options = new_cluster->info()->upstreamHttpProtocolOptions(); + + if (!proto_config.allow_insecure_cluster_options()) { + if (!options.has_value() || + (!options.value().auto_sni() || !options.value().auto_san_validation())) { + throw EnvoyException( + "dynamic_forward_proxy cluster must have auto_sni and auto_san_validation true unless " + "allow_insecure_cluster_options is set."); + } + } + auto lb = std::make_unique(*new_cluster); return std::make_pair(new_cluster, std::move(lb)); } diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.h b/source/extensions/clusters/dynamic_forward_proxy/cluster.h index dcd1e235607f5..e7266b97692e1 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.h +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.h @@ -51,7 +51,8 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, using HostInfoMap = absl::flat_hash_map; - struct LoadBalancer : public Upstream::LoadBalancer { + class LoadBalancer : public Upstream::LoadBalancer { + public: LoadBalancer(const Cluster& cluster) : cluster_(cluster) {} // Upstream::LoadBalancer @@ -61,19 +62,23 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, return nullptr; } + private: const Cluster& cluster_; }; - struct LoadBalancerFactory : public Upstream::LoadBalancerFactory { + class LoadBalancerFactory : public Upstream::LoadBalancerFactory { + public: LoadBalancerFactory(Cluster& cluster) : cluster_(cluster) {} // Upstream::LoadBalancerFactory Upstream::LoadBalancerPtr create() override { return std::make_unique(cluster_); } + private: Cluster& cluster_; }; - struct ThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer { + class ThreadAwareLoadBalancer : public Upstream::ThreadAwareLoadBalancer { + public: ThreadAwareLoadBalancer(Cluster& cluster) : cluster_(cluster) {} // Upstream::ThreadAwareLoadBalancer @@ -82,6 +87,7 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, } void initialize() override {} + private: Cluster& cluster_; }; diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index 25ae1c8c7c66d..59d6f7ad0d3ca 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -94,19 +94,38 @@ void RedisCluster::onClusterSlotUpdate(ClusterSlotsPtr&& slots) { absl::flat_hash_set all_new_hosts; for (const ClusterSlot& slot : *slots) { - new_hosts.emplace_back(new RedisHost(info(), "", slot.primary(), *this, true, time_source_)); - all_new_hosts.emplace(slot.primary()->asString()); + if (all_new_hosts.count(slot.primary()->asString()) == 0) { + new_hosts.emplace_back(new RedisHost(info(), "", slot.primary(), *this, true, time_source_)); + all_new_hosts.emplace(slot.primary()->asString()); + } for (auto const& replica : slot.replicas()) { - new_hosts.emplace_back(new RedisHost(info(), "", replica.second, *this, false, time_source_)); - all_new_hosts.emplace(replica.first); + if (all_new_hosts.count(replica.first) == 0) { + new_hosts.emplace_back( + new RedisHost(info(), "", replica.second, *this, false, time_source_)); + all_new_hosts.emplace(replica.first); + } } } - Upstream::HostMap updated_hosts; + // Get the map of all the latest existing hosts, which is used to filter out the existing + // hosts in the process of updating cluster memberships. + Upstream::HostMapConstSharedPtr all_hosts = priority_set_.crossPriorityHostMap(); + ASSERT(all_hosts != nullptr); + Upstream::HostVector hosts_added; Upstream::HostVector hosts_removed; const bool host_updated = updateDynamicHostList(new_hosts, hosts_, hosts_added, hosts_removed, - updated_hosts, all_hosts_, all_new_hosts); + *all_hosts, all_new_hosts); + + // Create a map containing all the latest hosts to determine whether the slots are updated. + Upstream::HostMap updated_hosts = *all_hosts; + for (const auto& host : hosts_removed) { + updated_hosts.erase(host->address()->asString()); + } + for (const auto& host : hosts_added) { + updated_hosts[host->address()->asString()] = host; + } + const bool slot_updated = lb_factory_ ? lb_factory_->onClusterSlotUpdate(std::move(slots), updated_hosts) : false; @@ -121,8 +140,6 @@ void RedisCluster::onClusterSlotUpdate(ClusterSlotsPtr&& slots) { info_->stats().update_no_rebuild_.inc(); } - all_hosts_ = std::move(updated_hosts); - // TODO(hyang): If there is an initialize callback, fire it now. Note that if the // cluster refers to multiple DNS names, this will return initialized after a single // DNS resolution completes. This is not perfect but is easier to code and it is unclear diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h index 96161c3514b11..ae8553b2effa9 100644 --- a/source/extensions/clusters/redis/redis_cluster.h +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -276,7 +276,6 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { const ClusterSlotUpdateCallBackSharedPtr lb_factory_; Upstream::HostVector hosts_; - Upstream::HostMap all_hosts_; const std::string auth_username_; const std::string auth_password_; diff --git a/source/extensions/clusters/redis/redis_cluster_lb.cc b/source/extensions/clusters/redis/redis_cluster_lb.cc index 25f38f448dae0..13e5af645efcc 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.cc +++ b/source/extensions/clusters/redis/redis_cluster_lb.cc @@ -18,7 +18,7 @@ bool ClusterSlot::operator==(const Envoy::Extensions::Clusters::Redis::ClusterSl // RedisClusterLoadBalancerFactory bool RedisClusterLoadBalancerFactory::onClusterSlotUpdate(ClusterSlotsPtr&& slots, - Envoy::Upstream::HostMap all_hosts) { + Envoy::Upstream::HostMap& all_hosts) { // The slots is sorted, allowing for a quick comparison to make sure we need to update the slot // array sort based on start and end to enable efficient comparison std::sort( diff --git a/source/extensions/clusters/redis/redis_cluster_lb.h b/source/extensions/clusters/redis/redis_cluster_lb.h index 4ac9f565e0b58..0edbfdedc93b6 100644 --- a/source/extensions/clusters/redis/redis_cluster_lb.h +++ b/source/extensions/clusters/redis/redis_cluster_lb.h @@ -109,7 +109,7 @@ class ClusterSlotUpdateCallBack { * @param all_hosts provides the updated hosts. * @return indicate if the cluster slot is updated or not. */ - virtual bool onClusterSlotUpdate(ClusterSlotsPtr&& slots, Upstream::HostMap all_hosts) PURE; + virtual bool onClusterSlotUpdate(ClusterSlotsPtr&& slots, Upstream::HostMap& all_hosts) PURE; /** * Callback when a host's health status is updated @@ -129,7 +129,7 @@ class RedisClusterLoadBalancerFactory : public ClusterSlotUpdateCallBack, RedisClusterLoadBalancerFactory(Random::RandomGenerator& random) : random_(random) {} // ClusterSlotUpdateCallBack - bool onClusterSlotUpdate(ClusterSlotsPtr&& slots, Upstream::HostMap all_hosts) override; + bool onClusterSlotUpdate(ClusterSlotsPtr&& slots, Upstream::HostMap& all_hosts) override; void onHostHealthUpdate() override; diff --git a/source/extensions/common/aws/BUILD b/source/extensions/common/aws/BUILD index 2f79649e36a05..8cdd3fc95a72d 100644 --- a/source/extensions/common/aws/BUILD +++ b/source/extensions/common/aws/BUILD @@ -62,6 +62,7 @@ envoy_cc_library( hdrs = ["utility.h"], external_deps = ["curl"], deps = [ + "//source/common/common:empty_string", "//source/common/common:utility_lib", "//source/common/http:headers_lib", ], diff --git a/source/extensions/common/aws/signer_impl.cc b/source/extensions/common/aws/signer_impl.cc index 368285c91fb05..a071e29149671 100644 --- a/source/extensions/common/aws/signer_impl.cc +++ b/source/extensions/common/aws/signer_impl.cc @@ -59,7 +59,7 @@ void SignerImpl::sign(Http::RequestHeaderMap& headers, const std::string& conten // Phase 1: Create a canonical request const auto canonical_headers = Utility::canonicalizeHeaders(headers); const auto canonical_request = Utility::createCanonicalRequest( - method_header->value().getStringView(), path_header->value().getStringView(), + service_name_, method_header->value().getStringView(), path_header->value().getStringView(), canonical_headers, content_hash); ENVOY_LOG(debug, "Canonical request:\n{}", canonical_request); // Phase 2: Create a string to sign diff --git a/source/extensions/common/aws/utility.cc b/source/extensions/common/aws/utility.cc index 497e37f409628..a0ba42fe4eb89 100644 --- a/source/extensions/common/aws/utility.cc +++ b/source/extensions/common/aws/utility.cc @@ -1,9 +1,12 @@ #include "source/extensions/common/aws/utility.h" +#include "source/common/common/empty_string.h" #include "source/common/common/fmt.h" #include "source/common/common/utility.h" +#include "absl/strings/match.h" #include "absl/strings/str_join.h" +#include "absl/strings/str_split.h" #include "curl/curl.h" namespace Envoy { @@ -11,6 +14,15 @@ namespace Extensions { namespace Common { namespace Aws { +constexpr absl::string_view PATH_SPLITTER = "/"; +constexpr absl::string_view QUERY_PARAM_SEPERATOR = "="; +constexpr absl::string_view QUERY_SEPERATOR = "&"; +constexpr absl::string_view QUERY_SPLITTER = "?"; +constexpr absl::string_view RESERVED_CHARS = "-._~"; +constexpr absl::string_view S3_SERVICE_NAME = "s3"; +const std::string URI_ENCODE = "%{:02X}"; +const std::string URI_DOUBLE_ENCODE = "%25{:02X}"; + std::map Utility::canonicalizeHeaders(const Http::RequestHeaderMap& headers) { std::map out; @@ -58,18 +70,22 @@ Utility::canonicalizeHeaders(const Http::RequestHeaderMap& headers) { return out; } -std::string -Utility::createCanonicalRequest(absl::string_view method, absl::string_view path, - const std::map& canonical_headers, - absl::string_view content_hash) { +std::string Utility::createCanonicalRequest( + absl::string_view service_name, absl::string_view method, absl::string_view path, + const std::map& canonical_headers, absl::string_view content_hash) { std::vector parts; parts.emplace_back(method); // don't include the query part of the path - const auto path_part = StringUtil::cropRight(path, "?"); - parts.emplace_back(path_part.empty() ? "/" : path_part); - const auto query_part = StringUtil::cropLeft(path, "?"); + const auto path_part = StringUtil::cropRight(path, QUERY_SPLITTER); + const auto canonicalized_path = path_part.empty() + ? std::string{PATH_SPLITTER} + : canonicalizePathString(path_part, service_name); + parts.emplace_back(canonicalized_path); + const auto query_part = StringUtil::cropLeft(path, QUERY_SPLITTER); // if query_part == path_part, then there is no query - parts.emplace_back(query_part == path_part ? "" : query_part); + const auto canonicalized_query = + query_part == path_part ? EMPTY_STRING : Utility::canonicalizeQueryString(query_part); + parts.emplace_back(absl::string_view(canonicalized_query)); std::vector formatted_headers; formatted_headers.reserve(canonical_headers.size()); for (const auto& header : canonical_headers) { @@ -77,13 +93,120 @@ Utility::createCanonicalRequest(absl::string_view method, absl::string_view path parts.emplace_back(formatted_headers.back()); } // need an extra blank space after the canonical headers - parts.emplace_back(""); + parts.emplace_back(EMPTY_STRING); const auto signed_headers = Utility::joinCanonicalHeaderNames(canonical_headers); parts.emplace_back(signed_headers); parts.emplace_back(content_hash); return absl::StrJoin(parts, "\n"); } +/** + * Normalizes the path string based on AWS requirements. + * See step 2 in https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + */ +std::string Utility::canonicalizePathString(absl::string_view path_string, + absl::string_view service_name) { + // If service is S3, do not normalize but only encode the path + if (absl::EqualsIgnoreCase(service_name, S3_SERVICE_NAME)) { + return encodePathSegment(path_string, service_name); + } + // If service is not S3, normalize and encode the path + const auto path_segments = StringUtil::splitToken(path_string, std::string{PATH_SPLITTER}); + std::vector path_list; + path_list.reserve(path_segments.size()); + for (const auto& path_segment : path_segments) { + if (path_segment.empty()) { + continue; + } + path_list.emplace_back(encodePathSegment(path_segment, service_name)); + } + auto canonical_path_string = + fmt::format("{}{}", PATH_SPLITTER, absl::StrJoin(path_list, PATH_SPLITTER)); + // Handle corner case when path ends with '/' + if (absl::EndsWith(path_string, PATH_SPLITTER) && canonical_path_string.size() > 1) { + canonical_path_string.push_back(PATH_SPLITTER[0]); + } + return canonical_path_string; +} + +bool isReservedChar(const char c) { + return std::isalnum(c) || RESERVED_CHARS.find(c) != std::string::npos; +} + +void encodeS3Path(std::string& encoded, const char& c) { + // Do not encode '/' for S3 + if (c == PATH_SPLITTER[0]) { + encoded.push_back(c); + } else { + absl::StrAppend(&encoded, fmt::format(URI_ENCODE, c)); + } +} + +std::string Utility::encodePathSegment(absl::string_view decoded, absl::string_view service_name) { + std::string encoded; + for (char c : decoded) { + if (isReservedChar(c)) { + // Escape unreserved chars from RFC 3986 + encoded.push_back(c); + } else if (absl::EqualsIgnoreCase(service_name, S3_SERVICE_NAME)) { + encodeS3Path(encoded, c); + } else { + // TODO: @aws, There is some inconsistency between AWS services if this should be double + // encoded or not. We need to parameterize this and expose this in the config. Ref: + // https://github.com/aws/aws-sdk-cpp/blob/main/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp#L79-L93 + absl::StrAppend(&encoded, fmt::format(URI_ENCODE, c)); + } + } + return encoded; +} + +/** + * Normalizes the query string based on AWS requirements. + * See step 3 in https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + */ +std::string Utility::canonicalizeQueryString(absl::string_view query_string) { + // Sort query string based on param name and append "=" if value is missing + const auto query_fragments = StringUtil::splitToken(query_string, QUERY_SEPERATOR); + std::vector> query_list; + for (const auto& query_fragment : query_fragments) { + // Only split at the first "=" and encode the rest + const std::vector query = + absl::StrSplit(query_fragment, absl::MaxSplits(QUERY_PARAM_SEPERATOR, 1)); + if (!query.empty()) { + const absl::string_view param = query[0]; + const absl::string_view value = query.size() > 1 ? query[1] : EMPTY_STRING; + query_list.emplace_back(std::make_pair(param, value)); + } + } + // Sort query params by name and value + std::sort(query_list.begin(), query_list.end()); + // Encode query params name and value separately + for (auto& query : query_list) { + query = std::make_pair(Utility::encodeQueryParam(query.first), + Utility::encodeQueryParam(query.second)); + } + return absl::StrJoin(query_list, QUERY_SEPERATOR, absl::PairFormatter(QUERY_PARAM_SEPERATOR)); +} + +std::string Utility::encodeQueryParam(absl::string_view decoded) { + std::string encoded; + for (char c : decoded) { + if (isReservedChar(c) || c == '%') { + // Escape unreserved chars from RFC 3986 + encoded.push_back(c); + } else if (c == '+') { + // Encode '+' as space + absl::StrAppend(&encoded, "%20"); + } else if (c == QUERY_PARAM_SEPERATOR[0]) { + // Double encode '=' + absl::StrAppend(&encoded, fmt::format(URI_DOUBLE_ENCODE, c)); + } else { + absl::StrAppend(&encoded, fmt::format(URI_ENCODE, c)); + } + } + return encoded; +} + std::string Utility::joinCanonicalHeaderNames(const std::map& canonical_headers) { return absl::StrJoin(canonical_headers, ";", [](auto* out, const auto& pair) { diff --git a/source/extensions/common/aws/utility.h b/source/extensions/common/aws/utility.h index 79f9b937b8959..36b34da02a9f2 100644 --- a/source/extensions/common/aws/utility.h +++ b/source/extensions/common/aws/utility.h @@ -21,16 +21,51 @@ class Utility { /** * Creates an AWS Signature V4 canonical request string. * See https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + * @param service_name the AWS service name. * @param method the HTTP request method. * @param path the request path. * @param canonical_headers the pre-canonicalized request headers. * @param content_hash the hashed request body. * @return the canonicalized request string. */ - static std::string - createCanonicalRequest(absl::string_view method, absl::string_view path, - const std::map& canonical_headers, - absl::string_view content_hash); + static std::string createCanonicalRequest( + absl::string_view service_name, absl::string_view method, absl::string_view path, + const std::map& canonical_headers, absl::string_view content_hash); + + /** + * Normalizes the path string based on AWS requirements. + * See step 2 in https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + * @param query_string the query string from the HTTP request. + * @param service_name the AWS service name. + * @return the canonicalized query string. + */ + static std::string canonicalizePathString(absl::string_view path_string, + absl::string_view service_name); + + /** + * URI encodes the given string based on AWS requirements. + * See step 2 in https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + * @param decoded the decoded string. + * @param service_name the AWS service name. + * @return the URI encoded string. + */ + static std::string encodePathSegment(absl::string_view decoded, absl::string_view service_name); + + /** + * Normalizes the query string based on AWS requirements. + * See step 3 in https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + * @param query_string the query string from the HTTP request. + * @return the canonicalized query string. + */ + static std::string canonicalizeQueryString(absl::string_view query_string); + + /** + * URI encodes the given string based on AWS requirements. + * See step 3 in https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + * @param decoded the decoded string. + * @return the URI encoded string. + */ + static std::string encodeQueryParam(absl::string_view decoded); /** * Get the semicolon-delimited string of canonical header names. diff --git a/source/extensions/common/dynamic_forward_proxy/BUILD b/source/extensions/common/dynamic_forward_proxy/BUILD index 1fa99cbecef29..f7daecf1ab23b 100644 --- a/source/extensions/common/dynamic_forward_proxy/BUILD +++ b/source/extensions/common/dynamic_forward_proxy/BUILD @@ -43,6 +43,7 @@ envoy_cc_library( "//envoy/network:dns_interface", "//envoy/thread_local:thread_local_interface", "//source/common/common:cleanup_lib", + "//source/common/common:key_value_store_lib", "//source/common/config:utility_lib", "//source/common/network:resolver_lib", "//source/common/network:utility_lib", diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index e4d8204687893..7a208b9f1398d 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -17,7 +17,8 @@ namespace DynamicForwardProxy { DnsCacheImpl::DnsCacheImpl( Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, + Random::RandomGenerator& random, Filesystem::Instance& file_system, Runtime::Loader& loader, + Stats::Scope& root_scope, ProtobufMessage::ValidationVisitor& validation_visitor, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) : main_thread_dispatcher_(main_thread_dispatcher), dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), @@ -31,6 +32,7 @@ DnsCacheImpl::DnsCacheImpl( Config::Utility::prepareDnsRefreshStrategy< envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>( config, refresh_interval_.count(), random)), + file_system_(file_system), validation_visitor_(validation_visitor), host_ttl_(PROTOBUF_GET_MS_OR_DEFAULT(config, host_ttl, 300000)), max_hosts_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_hosts, 1024)) { tls_slot_.set([&](Event::Dispatcher&) { return std::make_shared(*this); }); @@ -41,6 +43,8 @@ DnsCacheImpl::DnsCacheImpl( config.name(), config.preresolve_hostnames().size(), max_hosts_)); } + loadCacheEntries(config); + // Preresolved hostnames are resolved without a read lock on primary hosts because it is done // during object construction. for (const auto& hostname : config.preresolve_hostnames()) { @@ -252,6 +256,7 @@ void DnsCacheImpl::onReResolve(const std::string& host) { runRemoveCallbacks(host); } { + removeCacheEntry(host); absl::WriterMutexLock writer_lock{&primary_hosts_lock_}; auto host_it = primary_hosts_.find(host); ASSERT(host_it != primary_hosts_.end()); @@ -282,7 +287,7 @@ void DnsCacheImpl::startResolve(const std::string& host, PrimaryHostInfo& host_i void DnsCacheImpl::finishResolve(const std::string& host, Network::DnsResolver::ResolutionStatus status, - std::list&& response) { + std::list&& response, bool from_cache) { ASSERT(main_thread_dispatcher_.isThreadSafe()); ENVOY_LOG(debug, "main thread resolve complete for host '{}'. {} results", host, response.size()); @@ -323,6 +328,11 @@ void DnsCacheImpl::finishResolve(const std::string& host, bool address_changed = false; auto current_address = primary_host_info->host_info_->address(); if (new_address != nullptr && (current_address == nullptr || *current_address != *new_address)) { + if (!from_cache) { + addCacheEntry(host, new_address); + } + // TODO(alyssawilk) don't immediately push cached entries to threads. + // Only serve stale entries if a configured resolve timeout has fired. ENVOY_LOG(debug, "host '{}' address has changed", host); primary_host_info->host_info_->setAddress(new_address); runAddUpdateCallbacks(host, primary_host_info->host_info_); @@ -413,6 +423,50 @@ DnsCacheImpl::PrimaryHostInfo::~PrimaryHostInfo() { parent_.stats_.num_hosts_.dec(); } +void DnsCacheImpl::addCacheEntry(const std::string& host, + const Network::Address::InstanceConstSharedPtr& address) { + if (!key_value_store_) { + return; + } + // TODO(alyssawilk) cache data should include TTL, or some other indicator. + const std::string value = absl::StrCat(address->asString()); + key_value_store_->addOrUpdate(host, value); +} + +void DnsCacheImpl::removeCacheEntry(const std::string& host) { + if (!key_value_store_) { + return; + } + key_value_store_->remove(host); +} + +void DnsCacheImpl::loadCacheEntries( + const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) { + if (!config.has_key_value_config()) { + return; + } + auto& factory = + Config::Utility::getAndCheckFactory(config.key_value_config().config()); + key_value_store_ = factory.createStore(config.key_value_config(), validation_visitor_, + main_thread_dispatcher_, file_system_); + KeyValueStore::ConstIterateCb load = [this](const std::string& key, const std::string& value) { + auto address = Network::Utility::parseInternetAddressAndPortNoThrow(value); + if (address == nullptr) { + ENVOY_LOG(warn, "Unable to parse cache line '{}'", value); + return KeyValueStore::Iterate::Break; + } + stats_.cache_load_.inc(); + std::list response; + // TODO(alyssawilk) change finishResolve to actually use the TTL rather than + // putting 0 here, return the remaining TTL or indicate the result is stale. + response.emplace_back(Network::DnsResponse(address, std::chrono::seconds(0) /* ttl */)); + startCacheLoad(key, address->ip()->port()); + finishResolve(key, Network::DnsResolver::ResolutionStatus::Success, std::move(response), true); + return KeyValueStore::Iterate::Continue; + }; + key_value_store_->iterate(load); +} + } // namespace DynamicForwardProxy } // namespace Common } // namespace Extensions diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index 461b31ec1e424..28614a0181736 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/common/backoff_strategy.h" +#include "envoy/common/key_value_store.h" #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" #include "envoy/http/filter.h" #include "envoy/network/dns.h" @@ -21,6 +22,7 @@ namespace DynamicForwardProxy { * All DNS cache stats. @see stats_macros.h */ #define ALL_DNS_CACHE_STATS(COUNTER, GAUGE) \ + COUNTER(cache_load) \ COUNTER(dns_query_attempt) \ COUNTER(dns_query_failure) \ COUNTER(dns_query_success) \ @@ -39,10 +41,14 @@ struct DnsCacheStats { ALL_DNS_CACHE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; +class DnsCacheImplTest; + class DnsCacheImpl : public DnsCache, Logger::Loggable { public: DnsCacheImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Runtime::Loader& loader, Stats::Scope& root_scope, + Random::RandomGenerator& random, Filesystem::Instance& file_system, + Runtime::Loader& loader, Stats::Scope& root_scope, + ProtobufMessage::ValidationVisitor& validation_visitor, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config); ~DnsCacheImpl() override; static DnsCacheStats generateDnsCacheStats(Stats::Scope& scope); @@ -125,6 +131,7 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable&& response); + std::list&& response, bool from_cache = false); void runAddUpdateCallbacks(const std::string& host, const DnsHostInfoSharedPtr& host_info); void runRemoveCallbacks(const std::string& host); void notifyThreads(const std::string& host, const DnsHostInfoImplSharedPtr& resolved_info); @@ -178,6 +185,12 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable primary_hosts_ ABSL_GUARDED_BY(primary_hosts_lock_); + std::unique_ptr key_value_store_; DnsCacheResourceManagerImpl resource_manager_; const std::chrono::milliseconds refresh_interval_; const std::chrono::milliseconds timeout_interval_; const BackOffStrategyPtr failure_backoff_strategy_; + Filesystem::Instance& file_system_; + ProtobufMessage::ValidationVisitor& validation_visitor_; const std::chrono::milliseconds host_ttl_; const uint32_t max_hosts_; }; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc index 70d577df63d7a..7cb28f80e68de 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc @@ -26,8 +26,9 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( return existing_cache->second.cache_; } - DnsCacheSharedPtr new_cache = std::make_shared( - main_thread_dispatcher_, tls_, random_, loader_, root_scope_, config); + DnsCacheSharedPtr new_cache = + std::make_shared(main_thread_dispatcher_, tls_, random_, file_system_, loader_, + root_scope_, validation_visitor_, config); caches_.emplace(config.name(), ActiveCache{config, new_cache}); return new_cache; } @@ -35,8 +36,8 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( DnsCacheManagerSharedPtr DnsCacheManagerFactoryImpl::get() { return singleton_manager_.getTyped( SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), [this] { - return std::make_shared(dispatcher_, tls_, random_, loader_, - root_scope_); + return std::make_shared(dispatcher_, tls_, random_, file_system_, + loader_, root_scope_, validation_visitor_); }); } diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h index 553f45e22bacd..4b27404366a8c 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h @@ -14,10 +14,12 @@ namespace DynamicForwardProxy { class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { public: DnsCacheManagerImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Runtime::Loader& loader, - Stats::Scope& root_scope) + Random::RandomGenerator& random, Filesystem::Instance& file_system, + Runtime::Loader& loader, Stats::Scope& root_scope, + ProtobufMessage::ValidationVisitor& validation_visitor) : main_thread_dispatcher_(main_thread_dispatcher), tls_(tls), random_(random), - loader_(loader), root_scope_(root_scope) {} + file_system_(file_system), loader_(loader), root_scope_(root_scope), + validation_visitor_(validation_visitor) {} // DnsCacheManager DnsCacheSharedPtr getCache( @@ -36,18 +38,23 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { Event::Dispatcher& main_thread_dispatcher_; ThreadLocal::SlotAllocator& tls_; Random::RandomGenerator& random_; + Filesystem::Instance& file_system_; Runtime::Loader& loader_; Stats::Scope& root_scope_; + ProtobufMessage::ValidationVisitor& validation_visitor_; + absl::flat_hash_map caches_; }; class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { public: DnsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, Event::Dispatcher& dispatcher, - ThreadLocal::SlotAllocator& tls, Random::RandomGenerator& random, - Runtime::Loader& loader, Stats::Scope& root_scope) - : singleton_manager_(singleton_manager), dispatcher_(dispatcher), tls_(tls), random_(random), - loader_(loader), root_scope_(root_scope) {} + ThreadLocal::SlotAllocator& tls, Api::Api& api, + Runtime::Loader& loader, Stats::Scope& root_scope, + ProtobufMessage::ValidationVisitor& validation_visitor) + : singleton_manager_(singleton_manager), dispatcher_(dispatcher), tls_(tls), + random_(api.randomGenerator()), file_system_(api.fileSystem()), loader_(loader), + root_scope_(root_scope), validation_visitor_(validation_visitor) {} DnsCacheManagerSharedPtr get() override; @@ -56,8 +63,10 @@ class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { Event::Dispatcher& dispatcher_; ThreadLocal::SlotAllocator& tls_; Random::RandomGenerator& random_; + Filesystem::Instance& file_system_; Runtime::Loader& loader_; Stats::Scope& root_scope_; + ProtobufMessage::ValidationVisitor& validation_visitor_; }; } // namespace DynamicForwardProxy diff --git a/source/extensions/common/proxy_protocol/proxy_protocol_header.cc b/source/extensions/common/proxy_protocol/proxy_protocol_header.cc index 3296b0e14044c..025ced0dd606d 100644 --- a/source/extensions/common/proxy_protocol/proxy_protocol_header.cc +++ b/source/extensions/common/proxy_protocol/proxy_protocol_header.cc @@ -101,8 +101,10 @@ void generateV2Header(const Network::Address::Ip& source_address, void generateProxyProtoHeader(const envoy::config::core::v3::ProxyProtocolConfig& config, const Network::Connection& connection, Buffer::Instance& out) { - const Network::Address::Ip& dest_address = *connection.addressProvider().localAddress()->ip(); - const Network::Address::Ip& source_address = *connection.addressProvider().remoteAddress()->ip(); + const Network::Address::Ip& dest_address = + *connection.connectionInfoProvider().localAddress()->ip(); + const Network::Address::Ip& source_address = + *connection.connectionInfoProvider().remoteAddress()->ip(); if (config.version() == envoy::config::core::v3::ProxyProtocolConfig::V1) { generateV1Header(source_address, dest_address, out); } else if (config.version() == envoy::config::core::v3::ProxyProtocolConfig::V2) { diff --git a/source/extensions/common/tap/tap_config_base.cc b/source/extensions/common/tap/tap_config_base.cc index b1f9785c65888..f2d733c21c00f 100644 --- a/source/extensions/common/tap/tap_config_base.cc +++ b/source/extensions/common/tap/tap_config_base.cc @@ -6,7 +6,6 @@ #include "source/common/common/assert.h" #include "source/common/common/fmt.h" -#include "source/common/config/version_converter.h" #include "source/common/protobuf/utility.h" #include "source/extensions/common/matcher/matcher.h" @@ -83,7 +82,7 @@ TapConfigBaseImpl::TapConfigBaseImpl(const envoy::config::tap::v3::TapConfig& pr // Fallback to use the deprecated match_config field and upgrade (wire cast) it to the new // MatchPredicate which is backward compatible with the old MatchPredicate originally // introduced in the Tap filter. - Config::VersionConverter::upgrade(proto_config.match_config(), match); + MessageUtil::wireCast(proto_config.match_config(), match); } else { throw EnvoyException(fmt::format("Neither match nor match_config is set in TapConfig: {}", proto_config.DebugString())); diff --git a/source/extensions/common/wasm/context.cc b/source/extensions/common/wasm/context.cc index 7583c30f1d67e..41cd1b7a84d81 100644 --- a/source/extensions/common/wasm/context.cc +++ b/source/extensions/common/wasm/context.cc @@ -524,8 +524,8 @@ Context::findValue(absl::string_view name, Protobuf::Arena* arena, bool last) co case PropertyToken::CLUSTER_NAME: if (info && info->upstreamHost()) { return CelValue::CreateString(&info->upstreamHost()->cluster().name()); - } else if (info && info->routeEntry()) { - return CelValue::CreateString(&info->routeEntry()->clusterName()); + } else if (info && info->route() && info->route()->routeEntry()) { + return CelValue::CreateString(&info->route()->routeEntry()->clusterName()); } else if (info && info->upstreamClusterInfo().has_value() && info->upstreamClusterInfo().value()) { return CelValue::CreateString(&info->upstreamClusterInfo().value()->name()); @@ -551,8 +551,8 @@ Context::findValue(absl::string_view name, Protobuf::Arena* arena, bool last) co } break; case PropertyToken::ROUTE_METADATA: - if (info && info->routeEntry()) { - return CelProtoWrapper::CreateMessage(&info->routeEntry()->metadata(), arena); + if (info && info->route()) { + return CelProtoWrapper::CreateMessage(&info->route()->metadata(), arena); } break; case PropertyToken::PLUGIN_NAME: @@ -910,21 +910,6 @@ void Context::onUpstreamConnectionClose(CloseType close_type) { } } -uint32_t Context::nextHttpCallToken() { - uint32_t token = next_http_call_token_++; - // Handle rollover. - for (;;) { - if (token == 0) { - token = next_http_call_token_++; - } - if (!http_request_.count(token)) { - break; - } - token = next_http_call_token_++; - } - return token; -} - // Async call via HTTP WasmResult Context::httpCall(std::string_view cluster, const Pairs& request_headers, std::string_view request_body, const Pairs& request_trailers, @@ -961,7 +946,7 @@ WasmResult Context::httpCall(std::string_view cluster, const Pairs& request_head timeout = std::chrono::milliseconds(timeout_milliseconds); } - uint32_t token = nextHttpCallToken(); + uint32_t token = wasm()->nextHttpCallId(); auto& handler = http_request_[token]; handler.context_ = this; handler.token_ = token; @@ -983,22 +968,6 @@ WasmResult Context::httpCall(std::string_view cluster, const Pairs& request_head return WasmResult::Ok; } -uint32_t Context::nextGrpcCallToken() { - uint32_t token = next_grpc_token_++; - if (isGrpcStreamToken(token)) { - token = next_grpc_token_++; - } - // Handle rollover. Note: token is always odd. - for (;;) { - if (!grpc_call_request_.count(token)) { - break; - } - next_grpc_token_++; // Skip stream token. - token = next_grpc_token_++; - } - return token; -} - WasmResult Context::grpcCall(std::string_view grpc_service, std::string_view service_name, std::string_view method_name, const Pairs& initial_metadata, std::string_view request, std::chrono::milliseconds timeout, @@ -1019,7 +988,7 @@ WasmResult Context::grpcCall(std::string_view grpc_service, std::string_view ser return WasmResult::ParseFailure; } } - uint32_t token = nextGrpcCallToken(); + uint32_t token = wasm()->nextGrpcCallId(); auto& handler = grpc_call_request_[token]; handler.context_ = this; handler.token_ = token; @@ -1049,26 +1018,6 @@ WasmResult Context::grpcCall(std::string_view grpc_service, std::string_view ser return WasmResult::Ok; } -uint32_t Context::nextGrpcStreamToken() { - uint32_t token = next_grpc_token_++; - if (isGrpcCallToken(token)) { - token = next_grpc_token_++; - } - // Handle rollover. Note: token is always even. - for (;;) { - if (token == 0) { - next_grpc_token_++; // Skip call token. - token = next_grpc_token_++; - } - if (!grpc_stream_.count(token)) { - break; - } - next_grpc_token_++; // Skip call token. - token = next_grpc_token_++; - } - return token; -} - WasmResult Context::grpcStream(std::string_view grpc_service, std::string_view service_name, std::string_view method_name, const Pairs& initial_metadata, uint32_t* token_ptr) { @@ -1088,7 +1037,7 @@ WasmResult Context::grpcStream(std::string_view grpc_service, std::string_view s return WasmResult::ParseFailure; } } - uint32_t token = nextGrpcStreamToken(); + uint32_t token = wasm()->nextGrpcStreamId(); auto& handler = grpc_stream_[token]; handler.context_ = this; handler.token_ = token; @@ -1879,7 +1828,7 @@ void Context::onGrpcReceiveWrapper(uint32_t token, ::Envoy::Buffer::InstancePtr ContextBase::onGrpcReceive(token, response_size); grpc_receive_buffer_.reset(); } - if (isGrpcCallToken(token)) { + if (wasm()->isGrpcCallId(token)) { grpc_call_request_.erase(token); } } @@ -1899,9 +1848,9 @@ void Context::onGrpcCloseWrapper(uint32_t token, const Grpc::Status::GrpcStatus& onGrpcClose(token, status_code_); status_message_ = ""; } - if (isGrpcCallToken(token)) { + if (wasm()->isGrpcCallId(token)) { grpc_call_request_.erase(token); - } else { + } else if (wasm()->isGrpcStreamId(token)) { auto it = grpc_stream_.find(token); if (it != grpc_stream_.end()) { if (it->second.local_closed_) { @@ -1912,7 +1861,7 @@ void Context::onGrpcCloseWrapper(uint32_t token, const Grpc::Status::GrpcStatus& } WasmResult Context::grpcSend(uint32_t token, std::string_view message, bool end_stream) { - if (isGrpcCallToken(token)) { + if (!wasm()->isGrpcStreamId(token)) { return WasmResult::BadArgument; } auto it = grpc_stream_.find(token); @@ -1928,7 +1877,7 @@ WasmResult Context::grpcSend(uint32_t token, std::string_view message, bool end_ } WasmResult Context::grpcClose(uint32_t token) { - if (isGrpcCallToken(token)) { + if (wasm()->isGrpcCallId(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; @@ -1937,7 +1886,8 @@ WasmResult Context::grpcClose(uint32_t token) { it->second.request_->cancel(); } grpc_call_request_.erase(token); - } else { + return WasmResult::Ok; + } else if (wasm()->isGrpcStreamId(token)) { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; @@ -1950,12 +1900,13 @@ WasmResult Context::grpcClose(uint32_t token) { } else { it->second.local_closed_ = true; } + return WasmResult::Ok; } - return WasmResult::Ok; + return WasmResult::BadArgument; } WasmResult Context::grpcCancel(uint32_t token) { - if (isGrpcCallToken(token)) { + if (wasm()->isGrpcCallId(token)) { auto it = grpc_call_request_.find(token); if (it == grpc_call_request_.end()) { return WasmResult::NotFound; @@ -1964,7 +1915,8 @@ WasmResult Context::grpcCancel(uint32_t token) { it->second.request_->cancel(); } grpc_call_request_.erase(token); - } else { + return WasmResult::Ok; + } else if (wasm()->isGrpcStreamId(token)) { auto it = grpc_stream_.find(token); if (it == grpc_stream_.end()) { return WasmResult::NotFound; @@ -1973,8 +1925,9 @@ WasmResult Context::grpcCancel(uint32_t token) { it->second.stream_->resetStream(); } grpc_stream_.erase(token); + return WasmResult::Ok; } - return WasmResult::Ok; + return WasmResult::BadArgument; } } // namespace Wasm diff --git a/source/extensions/common/wasm/context.h b/source/extensions/common/wasm/context.h index 4882d7a2eea7f..b163c96c9edae 100644 --- a/source/extensions/common/wasm/context.h +++ b/source/extensions/common/wasm/context.h @@ -304,12 +304,6 @@ class Context : public proxy_wasm::ContextBase, return dynamic_cast(it->second.get()); } - uint32_t nextGrpcCallToken(); - uint32_t nextGrpcStreamToken(); - uint32_t nextHttpCallToken(); - void setNextGrpcTokenForTesting(uint32_t token) { next_grpc_token_ = token; } - void setNextHttpCallTokenForTesting(uint32_t token) { next_http_call_token_ = token; } - protected: friend class Wasm; @@ -392,9 +386,6 @@ class Context : public proxy_wasm::ContextBase, void onGrpcCloseWrapper(uint32_t token, const Grpc::Status::GrpcStatus& status, const std::string_view message); - bool isGrpcStreamToken(uint32_t token) { return (token & 1) == 0; } - bool isGrpcCallToken(uint32_t token) { return (token & 1) == 1; } - Http::HeaderMap* getMap(WasmHeaderMapType type); const Http::HeaderMap* getConstMap(WasmHeaderMapType type); diff --git a/source/extensions/common/wasm/ext/envoy_null_plugin.h b/source/extensions/common/wasm/ext/envoy_null_plugin.h index 49544355f4623..99a4963a41677 100644 --- a/source/extensions/common/wasm/ext/envoy_null_plugin.h +++ b/source/extensions/common/wasm/ext/envoy_null_plugin.h @@ -15,8 +15,8 @@ namespace Extensions { namespace Common { namespace Wasm { -proxy_wasm::Word resolve_dns(void* raw_context, proxy_wasm::Word dns_address, - proxy_wasm::Word dns_address_size, proxy_wasm::Word token_ptr); +proxy_wasm::Word resolve_dns(proxy_wasm::Word dns_address, proxy_wasm::Word dns_address_size, + proxy_wasm::Word token_ptr); } // namespace Wasm } // namespace Common @@ -36,7 +36,7 @@ using namespace proxy_wasm::null_plugin; inline WasmResult envoy_resolve_dns(const char* dns_address, size_t dns_address_size, uint32_t* token) { return static_cast( - ::Envoy::Extensions::Common::Wasm::resolve_dns(proxy_wasm::current_context_, WR(dns_address), + ::Envoy::Extensions::Common::Wasm::resolve_dns(WR(dns_address), WS(dns_address_size), WR(token)) .u64_); } diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index a126cac544c65..290890f83dd4b 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -11,10 +11,6 @@ #include "absl/strings/str_cat.h" -#define WASM_CONTEXT(_c) \ - static_cast(proxy_wasm::exports::ContextOrEffectiveContext( \ - static_cast((void)_c, proxy_wasm::current_context_))) - using proxy_wasm::FailState; using proxy_wasm::Word; @@ -152,8 +148,8 @@ Wasm::~Wasm() { } // NOLINTNEXTLINE(readability-identifier-naming) -Word resolve_dns(void* raw_context, Word dns_address_ptr, Word dns_address_size, Word token_ptr) { - auto context = WASM_CONTEXT(raw_context); +Word resolve_dns(Word dns_address_ptr, Word dns_address_size, Word token_ptr) { + auto context = static_cast(proxy_wasm::contextOrEffectiveContext()); auto root_context = context->isRootContext() ? context : context->rootContext(); auto address = context->wasmVm()->getMemory(dns_address_ptr, dns_address_size); if (!address) { diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 7ea4c9c24e1f0..907828f07f8c6 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -103,7 +103,6 @@ EXTENSIONS = { "envoy.filters.http.rbac": "//source/extensions/filters/http/rbac:config", "envoy.filters.http.router": "//source/extensions/filters/http/router:config", "envoy.filters.http.set_metadata": "//source/extensions/filters/http/set_metadata:config", - "envoy.filters.http.squash": "//source/extensions/filters/http/squash:config", "envoy.filters.http.tap": "//source/extensions/filters/http/tap:config", "envoy.filters.http.wasm": "//source/extensions/filters/http/wasm:config", @@ -132,15 +131,11 @@ EXTENSIONS = { "envoy.filters.network.echo": "//source/extensions/filters/network/echo:config", "envoy.filters.network.ext_authz": "//source/extensions/filters/network/ext_authz:config", "envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config", - "envoy.filters.network.kafka_broker": "//source/extensions/filters/network/kafka:kafka_broker_config_lib", "envoy.filters.network.local_ratelimit": "//source/extensions/filters/network/local_ratelimit:config", "envoy.filters.network.mongo_proxy": "//source/extensions/filters/network/mongo_proxy:config", - "envoy.filters.network.mysql_proxy": "//source/extensions/filters/network/mysql_proxy:config", - "envoy.filters.network.postgres_proxy": "//source/extensions/filters/network/postgres_proxy:config", "envoy.filters.network.ratelimit": "//source/extensions/filters/network/ratelimit:config", "envoy.filters.network.rbac": "//source/extensions/filters/network/rbac:config", "envoy.filters.network.redis_proxy": "//source/extensions/filters/network/redis_proxy:config", - "envoy.filters.network.rocketmq_proxy": "//source/extensions/filters/network/rocketmq_proxy:config", "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", "envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", "envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", @@ -294,7 +289,14 @@ EXTENSIONS = { # Formatter # + "envoy.formatter.metadata": "//source/extensions/formatter/metadata:config", "envoy.formatter.req_without_query": "//source/extensions/formatter/req_without_query:config", + + # + # Key value store + # + + "envoy.key_value.file_based": "//source/extensions/key_value/file_based:config_lib", } # These can be changed to ["//visibility:public"], for downstream builds which diff --git a/source/extensions/extensions_metadata.yaml b/source/extensions/extensions_metadata.yaml index 5dda2861d1292..c8ad920b51e17 100644 --- a/source/extensions/extensions_metadata.yaml +++ b/source/extensions/extensions_metadata.yaml @@ -263,11 +263,6 @@ envoy.filters.http.set_metadata: - envoy.filters.http security_posture: robust_to_untrusted_downstream_and_upstream status: stable -envoy.filters.http.squash: - categories: - - envoy.filters.http - security_posture: requires_trusted_downstream_and_upstream - status: stable envoy.filters.http.tap: categories: - envoy.filters.http @@ -343,11 +338,6 @@ envoy.filters.network.envoy_mobile_http_connection_manager: - envoy.filters.network security_posture: robust_to_untrusted_downstream status: stable -envoy.filters.network.kafka_broker: - categories: - - envoy.filters.network - security_posture: requires_trusted_downstream_and_upstream - status: wip envoy.filters.network.local_ratelimit: categories: - envoy.filters.network @@ -358,16 +348,6 @@ envoy.filters.network.mongo_proxy: - envoy.filters.network security_posture: requires_trusted_downstream_and_upstream status: stable -envoy.filters.network.mysql_proxy: - categories: - - envoy.filters.network - security_posture: requires_trusted_downstream_and_upstream - status: alpha -envoy.filters.network.postgres_proxy: - categories: - - envoy.filters.network - security_posture: requires_trusted_downstream_and_upstream - status: stable envoy.filters.network.ratelimit: categories: - envoy.filters.network @@ -383,11 +363,6 @@ envoy.filters.network.redis_proxy: - envoy.filters.network security_posture: requires_trusted_downstream_and_upstream status: stable -envoy.filters.network.rocketmq_proxy: - categories: - - envoy.filters.network - security_posture: requires_trusted_downstream_and_upstream - status: alpha envoy.filters.network.sni_cluster: categories: - envoy.filters.network @@ -438,6 +413,11 @@ envoy.filters.udp_listener.udp_proxy: - envoy.filters.udp_listener security_posture: robust_to_untrusted_downstream status: stable +envoy.formatter.metadata: + categories: + - envoy.formatter + security_posture: robust_to_untrusted_downstream_and_upstream + status: alpha envoy.formatter.req_without_query: categories: - envoy.formatter @@ -592,8 +572,8 @@ envoy.stat_sinks.wasm: envoy.tls.cert_validator.spiffe: categories: - envoy.tls.cert_validator - security_posture: unknown - status: wip + security_posture: requires_trusted_downstream_and_upstream + status: alpha envoy.tracers.datadog: categories: - envoy.tracers @@ -719,3 +699,8 @@ envoy.watchdog.profile_action: - envoy.guarddog_actions security_posture: data_plane_agnostic status: alpha +envoy.key_value.file_based: + categories: + - envoy.common.key_value + security_posture: data_plane_agnostic + status: alpha diff --git a/source/extensions/filters/common/expr/context.cc b/source/extensions/filters/common/expr/context.cc index 934f78728bcd4..ac0a47bd98d33 100644 --- a/source/extensions/filters/common/expr/context.cc +++ b/source/extensions/filters/common/expr/context.cc @@ -181,8 +181,9 @@ absl::optional ConnectionWrapper::operator[](CelValue key) const { } auto value = key.StringOrDie().value(); if (value == MTLS) { - return CelValue::CreateBool(info_.downstreamSslConnection() != nullptr && - info_.downstreamSslConnection()->peerCertificatePresented()); + return CelValue::CreateBool( + info_.downstreamAddressProvider().sslConnection() != nullptr && + info_.downstreamAddressProvider().sslConnection()->peerCertificatePresented()); } else if (value == RequestedServerName) { return CelValue::CreateStringView(info_.downstreamAddressProvider().requestedServerName()); } else if (value == ID) { @@ -198,7 +199,7 @@ absl::optional ConnectionWrapper::operator[](CelValue key) const { return {}; } - auto ssl_info = info_.downstreamSslConnection(); + auto ssl_info = info_.downstreamAddressProvider().sslConnection(); if (ssl_info != nullptr) { return extractSslInfo(*ssl_info, value); } diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.cc b/source/extensions/filters/common/ext_authz/check_request_utils.cc index 4b9614e4434c4..157f23e6fb142 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.cc +++ b/source/extensions/filters/common/ext_authz/check_request_utils.cc @@ -38,11 +38,11 @@ void CheckRequestUtils::setAttrContextPeer(envoy::service::auth::v3::AttributeCo // Set the address auto addr = peer.mutable_address(); if (local) { - Envoy::Network::Utility::addressToProtobufAddress(*connection.addressProvider().localAddress(), - *addr); + Envoy::Network::Utility::addressToProtobufAddress( + *connection.connectionInfoProvider().localAddress(), *addr); } else { - Envoy::Network::Utility::addressToProtobufAddress(*connection.addressProvider().remoteAddress(), - *addr); + Envoy::Network::Utility::addressToProtobufAddress( + *connection.connectionInfoProvider().remoteAddress(), *addr); } // Set the principal. Preferably the URI SAN, DNS SAN or Subject in that order from the peer's @@ -122,8 +122,13 @@ void CheckRequestUtils::setHttpRequest( headers.iterate([mutable_headers](const Envoy::Http::HeaderEntry& e) { // Skip any client EnvoyAuthPartialBody header, which could interfere with internal use. if (e.key().getStringView() != Headers::get().EnvoyAuthPartialBody.get()) { - (*mutable_headers)[std::string(e.key().getStringView())] = - std::string(e.value().getStringView()); + const std::string key(e.key().getStringView()); + if (mutable_headers->find(key) == mutable_headers->end()) { + (*mutable_headers)[key] = std::string(e.value().getStringView()); + } else { + // Merge duplicate headers. + (*mutable_headers)[key].append(",").append(std::string(e.value().getStringView())); + } } return Envoy::Http::HeaderMap::Iterate::Continue; }); diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index b3b62d7dd9049..d462a5179572b 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -17,13 +17,10 @@ namespace Common { namespace ExtAuthz { GrpcClientImpl::GrpcClientImpl(const Grpc::RawAsyncClientSharedPtr& async_client, - const absl::optional& timeout, - envoy::config::core::v3::ApiVersion transport_api_version) + const absl::optional& timeout) : async_client_(async_client), timeout_(timeout), - service_method_(Grpc::VersionedMethods("envoy.service.auth.v3.Authorization.Check", - "envoy.service.auth.v2.Authorization.Check") - .getMethodDescriptorForVersion(transport_api_version)), - transport_api_version_(transport_api_version) {} + service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.auth.v3.Authorization.Check")) {} GrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); } @@ -43,8 +40,7 @@ void GrpcClientImpl::check(RequestCallbacks& callbacks, options.setParentContext(Http::AsyncClient::ParentContext{&stream_info}); ENVOY_LOG(trace, "Sending CheckRequest: {}", request.DebugString()); - request_ = async_client_->send(service_method_, request, *this, parent_span, options, - transport_api_version_); + request_ = async_client_->send(service_method_, request, *this, parent_span, options); } void GrpcClientImpl::onSuccess(std::unique_ptr&& response, diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h index 48f9498dfbf74..f13d6bd6d33cf 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.h @@ -44,8 +44,7 @@ class GrpcClientImpl : public Client, public Logger::Loggable { public: GrpcClientImpl(const Grpc::RawAsyncClientSharedPtr& async_client, - const absl::optional& timeout, - envoy::config::core::v3::ApiVersion transport_api_version); + const absl::optional& timeout); ~GrpcClientImpl() override; // ExtAuthz::Client @@ -70,7 +69,6 @@ class GrpcClientImpl : public Client, absl::optional timeout_; RequestCallbacks* callbacks_{}; const Protobuf::MethodDescriptor& service_method_; - const envoy::config::core::v3::ApiVersion transport_api_version_; }; using GrpcClientImplPtr = std::unique_ptr; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index f80b3022080a0..4dba952fede7d 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -86,7 +86,9 @@ std::vector createStringMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { std::vector matchers; for (const auto& matcher : list.patterns()) { - matchers.push_back(std::make_unique(matcher)); + matchers.push_back( + std::make_unique>( + matcher)); } return matchers; } @@ -136,7 +138,9 @@ ClientConfig::toRequestMatchers(const envoy::type::matcher::v3::ListStringMatche for (const auto& key : keys) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact(key.get()); - matchers.push_back(std::make_unique(matcher)); + matchers.push_back( + std::make_unique>( + matcher)); } return std::make_shared(std::move(matchers)); @@ -157,7 +161,9 @@ ClientConfig::toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher if (matchers.empty()) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact(Http::Headers::get().Host.get()); - matchers.push_back(std::make_unique(matcher)); + matchers.push_back( + std::make_unique>( + matcher)); return std::make_shared(std::move(matchers)); } @@ -171,7 +177,9 @@ ClientConfig::toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher for (const auto& key : keys) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact(key.get()); - matchers.push_back(std::make_unique(matcher)); + matchers.push_back( + std::make_unique>( + matcher)); } return std::make_shared(std::move(matchers)); diff --git a/source/extensions/filters/common/original_src/original_src_socket_option.cc b/source/extensions/filters/common/original_src/original_src_socket_option.cc index 0942ffd113d8d..c8a911d1ece0e 100644 --- a/source/extensions/filters/common/original_src/original_src_socket_option.cc +++ b/source/extensions/filters/common/original_src/original_src_socket_option.cc @@ -21,7 +21,7 @@ bool OriginalSrcSocketOption::setOption( Network::Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const { if (state == envoy::config::core::v3::SocketOption::STATE_PREBIND) { - socket.addressProvider().setLocalAddress(src_address_); + socket.connectionInfoProvider().setLocalAddress(src_address_); } return true; diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 2c361c3180675..fbb8858dfd2fe 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -20,14 +20,10 @@ namespace Common { namespace RateLimit { GrpcClientImpl::GrpcClientImpl(const Grpc::RawAsyncClientSharedPtr& async_client, - const absl::optional& timeout, - envoy::config::core::v3::ApiVersion transport_api_version) + const absl::optional& timeout) : async_client_(async_client), timeout_(timeout), - service_method_( - Grpc::VersionedMethods("envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit", - "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit") - .getMethodDescriptorForVersion(transport_api_version)), - transport_api_version_(transport_api_version) {} + service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit")) {} GrpcClientImpl::~GrpcClientImpl() { ASSERT(!callbacks_); } @@ -71,8 +67,7 @@ void GrpcClientImpl::limit(RequestCallbacks& callbacks, const std::string& domai request_ = async_client_->send(service_method_, request, *this, parent_span, Http::AsyncClient::RequestOptions().setTimeout(timeout_).setParentContext( - Http::AsyncClient::ParentContext{&stream_info}), - transport_api_version_); + Http::AsyncClient::ParentContext{&stream_info})); } void GrpcClientImpl::onSuccess( @@ -124,14 +119,13 @@ void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::strin ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, const envoy::config::core::v3::GrpcService& grpc_service, - const std::chrono::milliseconds timeout, - envoy::config::core::v3::ApiVersion transport_api_version) { + const std::chrono::milliseconds timeout) { // TODO(ramaraochavali): register client to singleton when GrpcClientImpl supports concurrent // requests. return std::make_unique( context.clusterManager().grpcAsyncClientManager().getOrCreateRawAsyncClient( grpc_service, context.scope(), true, Grpc::CacheOption::CacheWhenRuntimeEnabled), - timeout, transport_api_version); + timeout); } } // namespace RateLimit diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.h b/source/extensions/filters/common/ratelimit/ratelimit_impl.h index f97d473c68372..fa48d9b61b340 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.h +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.h @@ -45,8 +45,7 @@ class GrpcClientImpl : public Client, public Logger::Loggable { public: GrpcClientImpl(const Grpc::RawAsyncClientSharedPtr& async_client, - const absl::optional& timeout, - envoy::config::core::v3::ApiVersion transport_api_version); + const absl::optional& timeout); ~GrpcClientImpl() override; static void createRequest(envoy::service::ratelimit::v3::RateLimitRequest& request, @@ -74,7 +73,6 @@ class GrpcClientImpl : public Client, absl::optional timeout_; RequestCallbacks* callbacks_{}; const Protobuf::MethodDescriptor& service_method_; - const envoy::config::core::v3::ApiVersion transport_api_version_; }; /** @@ -82,8 +80,7 @@ class GrpcClientImpl : public Client, */ ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, const envoy::config::core::v3::GrpcService& grpc_service, - const std::chrono::milliseconds timeout, - envoy::config::core::v3::ApiVersion transport_api_version); + const std::chrono::milliseconds timeout); } // namespace RateLimit } // namespace Common diff --git a/source/extensions/filters/common/rbac/matchers.cc b/source/extensions/filters/common/rbac/matchers.cc index 25d8e2de63b54..f0efb5c7d668b 100644 --- a/source/extensions/filters/common/rbac/matchers.cc +++ b/source/extensions/filters/common/rbac/matchers.cc @@ -23,6 +23,8 @@ MatcherConstSharedPtr Matcher::create(const envoy::config::rbac::v3::Permission& IPMatcher::Type::DownstreamLocal); case envoy::config::rbac::v3::Permission::RuleCase::kDestinationPort: return std::make_shared(permission.destination_port()); + case envoy::config::rbac::v3::Permission::RuleCase::kDestinationPortRange: + return std::make_shared(permission.destination_port_range()); case envoy::config::rbac::v3::Permission::RuleCase::kAny: return std::make_shared(); case envoy::config::rbac::v3::Permission::RuleCase::kMetadata: @@ -135,7 +137,7 @@ bool IPMatcher::matches(const Network::Connection& connection, const Envoy::Http Envoy::Network::Address::InstanceConstSharedPtr ip; switch (type_) { case ConnectionRemote: - ip = connection.addressProvider().remoteAddress(); + ip = connection.connectionInfoProvider().remoteAddress(); break; case DownstreamLocal: ip = info.downstreamAddressProvider().localAddress(); @@ -159,6 +161,34 @@ bool PortMatcher::matches(const Network::Connection&, const Envoy::Http::Request return ip && ip->port() == port_; } +PortRangeMatcher::PortRangeMatcher(const ::envoy::type::v3::Int32Range& range) + : start_(range.start()), end_(range.end()) { + auto start = range.start(); + auto end = range.end(); + if (start < 0 || start > 65536) { + throw EnvoyException(fmt::format("range start {} is out of bounds", start)); + } + if (end < 0 || end > 65536) { + throw EnvoyException(fmt::format("range end {} is out of bounds", end)); + } + if (start >= end) { + throw EnvoyException( + fmt::format("range start {} cannot be greater or equal than range end {}", start, end)); + } +} + +bool PortRangeMatcher::matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&, + const StreamInfo::StreamInfo& info) const { + const Envoy::Network::Address::Ip* ip = + info.downstreamAddressProvider().localAddress().get()->ip(); + if (ip) { + const auto port = ip->port(); + return start_ <= port && port < end_; + } else { + return false; + } +} + bool AuthenticatedMatcher::matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap&, const StreamInfo::StreamInfo&) const { diff --git a/source/extensions/filters/common/rbac/matchers.h b/source/extensions/filters/common/rbac/matchers.h index 472b4a2c9c17e..5623dee2b70a9 100644 --- a/source/extensions/filters/common/rbac/matchers.h +++ b/source/extensions/filters/common/rbac/matchers.h @@ -163,6 +163,18 @@ class PortMatcher : public Matcher { const uint32_t port_; }; +class PortRangeMatcher : public Matcher { +public: + PortRangeMatcher(const ::envoy::type::v3::Int32Range& range); + + bool matches(const Network::Connection&, const Envoy::Http::RequestHeaderMap&, + const StreamInfo::StreamInfo& info) const override; + +private: + const uint32_t start_; + const uint32_t end_; +}; + /** * Matches the principal name as described in the peer certificate. Uses the URI SAN first. If that * field is not present, uses the subject instead. @@ -171,14 +183,17 @@ class AuthenticatedMatcher : public Matcher { public: AuthenticatedMatcher(const envoy::config::rbac::v3::Principal::Authenticated& auth) : matcher_(auth.has_principal_name() - ? absl::make_optional(auth.principal_name()) + ? absl::make_optional< + Matchers::StringMatcherImpl>( + auth.principal_name()) : absl::nullopt) {} bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo&) const override; private: - const absl::optional matcher_; + const absl::optional> + matcher_; }; /** @@ -222,10 +237,13 @@ class MetadataMatcher : public Matcher { * Perform a match against the request server from the client's connection * request. This is typically TLS SNI. */ -class RequestedServerNameMatcher : public Matcher, Envoy::Matchers::StringMatcherImpl { +class RequestedServerNameMatcher + : public Matcher, + Envoy::Matchers::StringMatcherImpl { public: RequestedServerNameMatcher(const envoy::type::matcher::v3::StringMatcher& requested_server_name) - : Envoy::Matchers::StringMatcherImpl(requested_server_name) {} + : Envoy::Matchers::StringMatcherImpl( + requested_server_name) {} bool matches(const Network::Connection& connection, const Envoy::Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo&) const override; diff --git a/source/extensions/filters/http/alternate_protocols_cache/BUILD b/source/extensions/filters/http/alternate_protocols_cache/BUILD index cb516768fadbf..e0a952af87d97 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/BUILD +++ b/source/extensions/filters/http/alternate_protocols_cache/BUILD @@ -18,7 +18,7 @@ envoy_cc_library( "//source/common/http:alternate_protocols_cache", "//source/common/http:headers_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "@com_googlesource_quiche//:spdy_core_alt_svc_wire_format_lib", + "@com_github_google_quiche//:spdy_core_alt_svc_wire_format_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg_cc_proto", diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 1b04b15b7ca11..53219e59f3891 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -44,6 +44,17 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "cache_policy_lib", + hdrs = ["cache_policy.h"], + deps = [ + ":cache_headers_utils_lib", + ":http_cache_lib", + "//source/common/http:header_map_lib", + "//source/common/stream_info:filter_state_lib", + ], +) + envoy_proto_library( name = "key", srcs = ["key.proto"], @@ -84,6 +95,7 @@ envoy_cc_library( "//source/common/http:header_utility_lib", "//source/common/http:headers_lib", "//source/common/protobuf", + "@com_google_absl//absl/container:btree", "@envoy_api//envoy/extensions/filters/http/cache/v3alpha:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index 5560298f73561..418a770b9e2ca 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -89,7 +89,7 @@ class CacheFilter : public Http::PassThroughFilter, // of doing it per-request. A good example of such config is found in the gzip filter: // source/extensions/filters/http/gzip/gzip_filter.h. // Stores the allow list rules that decide if a header can be varied upon. - VaryHeader vary_allow_list_; + VaryAllowList vary_allow_list_; // True if the response has trailers. // TODO(toddmgreer): cache trailers. diff --git a/source/extensions/filters/http/cache/cache_headers_utils.cc b/source/extensions/filters/http/cache/cache_headers_utils.cc index 73a9c5b5625a8..c3972afa50f07 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.cc +++ b/source/extensions/filters/http/cache/cache_headers_utils.cc @@ -11,6 +11,7 @@ #include "source/extensions/filters/http/cache/cache_custom_headers.h" #include "absl/algorithm/container.h" +#include "absl/container/btree_set.h" #include "absl/strings/ascii.h" #include "absl/strings/numbers.h" #include "absl/strings/str_split.h" @@ -213,40 +214,48 @@ void CacheHeadersUtils::getAllMatchingHeaderNames( }); } -std::vector -CacheHeadersUtils::parseCommaDelimitedList(const Http::HeaderMap::GetResult& entry) { - if (entry.empty()) { - return {}; - } - - // TODO(mattklein123): Consider multiple header values? - std::vector header_values = absl::StrSplit(entry[0]->value().getStringView(), ','); - for (std::string& value : header_values) { - // TODO(cbdm): Might be able to improve the performance here by using StringUtil::trim to - // remove whitespace. - absl::StripAsciiWhitespace(&value); +std::vector +CacheHeadersUtils::parseCommaDelimitedHeader(const Http::HeaderMap::GetResult& entry) { + std::vector values; + for (size_t i = 0; i < entry.size(); ++i) { + for (absl::string_view s : absl::StrSplit(entry[i]->value().getStringView(), ',')) { + if (s.empty()) { + continue; + } + values.emplace_back(absl::StripAsciiWhitespace(s)); + } } - - return header_values; + return values; } -VaryHeader::VaryHeader( +VaryAllowList::VaryAllowList( const Protobuf::RepeatedPtrField& allow_list) { for (const auto& rule : allow_list) { - allow_list_.emplace_back(std::make_unique(rule)); + allow_list_.emplace_back( + std::make_unique>( + rule)); + } +} + +bool VaryAllowList::allowsValue(const absl::string_view vary_value) const { + for (const auto& rule : allow_list_) { + if (rule->match(vary_value)) { + return true; + } } + return false; } -bool VaryHeader::isAllowed(const Http::ResponseHeaderMap& headers) const { - if (!VaryHeader::hasVary(headers)) { +bool VaryAllowList::allowsHeaders(const Http::ResponseHeaderMap& headers) const { + if (!VaryHeaderUtils::hasVary(headers)) { return true; } - std::vector varied_headers = - CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::CustomHeaders::get().Vary)); + std::vector varied_headers = + CacheHeadersUtils::parseCommaDelimitedHeader(headers.get(Http::CustomHeaders::get().Vary)); - for (const std::string& header : varied_headers) { + for (absl::string_view& header : varied_headers) { bool valid = false; // "Vary: *" should never be cached per: @@ -255,11 +264,8 @@ bool VaryHeader::isAllowed(const Http::ResponseHeaderMap& headers) const { return false; } - for (const auto& rule : allow_list_) { - if (rule->match(header)) { - valid = true; - break; - } + if (allowsValue(header)) { + valid = true; } if (!valid) { @@ -270,69 +276,73 @@ bool VaryHeader::isAllowed(const Http::ResponseHeaderMap& headers) const { return true; } -bool VaryHeader::hasVary(const Http::ResponseHeaderMap& headers) { +bool VaryHeaderUtils::hasVary(const Http::ResponseHeaderMap& headers) { // TODO(mattklein123): Support multiple vary headers and/or just make the vary header inline. const auto vary_header = headers.get(Http::CustomHeaders::get().Vary); return !vary_header.empty() && !vary_header[0]->value().empty(); } +absl::btree_set +VaryHeaderUtils::getVaryValues(const Http::ResponseHeaderMap& headers) { + Http::HeaderMap::GetResult vary_headers = headers.get(Http::CustomHeaders::get().Vary); + if (vary_headers.empty()) { + return {}; + } + + std::vector values = + CacheHeadersUtils::parseCommaDelimitedHeader(vary_headers); + return absl::btree_set(values.begin(), values.end()); +} + namespace { // The separator characters are used to create the vary-key, and must be characters that are // invalid to be inside values and header names. The chosen characters are invalid per: // https://tools.ietf.org/html/rfc2616#section-4.2. // Used to separate the values of different headers. -constexpr absl::string_view header_separator = "\n"; +constexpr absl::string_view headerSeparator = "\n"; // Used to separate multiple values of a same header. -constexpr absl::string_view in_value_separator = "\r"; +constexpr absl::string_view inValueSeparator = "\r"; }; // namespace -std::string VaryHeader::createVaryKey(const Http::HeaderMap::GetResult& vary_header, - const Http::RequestHeaderMap& entry_headers) { - if (vary_header.empty()) { - return ""; +absl::optional +VaryHeaderUtils::createVaryIdentifier(const VaryAllowList& allow_list, + const absl::btree_set& vary_header_values, + const Http::RequestHeaderMap& request_headers) { + std::string vary_identifier = "vary-id\n"; + if (vary_header_values.empty()) { + return vary_identifier; } - // TODO(mattklein123): Support multiple vary headers and/or just make the vary header inline. - ASSERT(vary_header[0]->key() == "vary"); - - std::string vary_key = "vary-key\n"; - - for (const std::string& header : CacheHeadersUtils::parseCommaDelimitedList(vary_header)) { - // TODO(cbdm): Can add some bucketing logic here based on header. For example, we could - // normalize the values for accept-language by making all of {en-CA, en-GB, en-US} into - // "en". This way we would not need to store multiple versions of the same payload, and any - // of those values would find the payload in the requested language. Another example would be to - // bucket UserAgent values into android/ios/desktop; UserAgent::initializeFromHeaders tries to - // do that normalization and could be used as an inspiration for some bucketing configuration. - // The config should enable and control the bucketing wanted. + for (const absl::string_view& value : vary_header_values) { + if (value.empty()) { + // Empty headers are ignored. + continue; + } + if (!allow_list.allowsValue(value)) { + // The backend tried to vary on a header that we don't allow, so return + // absl::nullopt to indicate we are unable to cache this request. This + // also may occur if the allow list has changed since an item was cached, + // rendering the cached vary value invalid. + return absl::nullopt; + } + // TODO(cbdm): Can add some bucketing logic here based on header. For + // example, we could normalize the values for accept-language by making all + // of {en-CA, en-GB, en-US} into "en". This way we would not need to store + // multiple versions of the same payload, and any of those values would find + // the payload in the requested language. Another example would be to bucket + // UserAgent values into android/ios/desktop; + // UserAgent::initializeFromHeaders tries to do that normalization and could + // be used as an inspiration for some bucketing configuration. The config + // should enable and control the bucketing wanted. const auto all_values = Http::HeaderUtility::getAllOfHeaderAsString( - entry_headers, Http::LowerCaseString(header), in_value_separator); - absl::StrAppend(&vary_key, header, in_value_separator, + request_headers, Http::LowerCaseString(std::string(value)), inValueSeparator); + absl::StrAppend(&vary_identifier, value, inValueSeparator, all_values.result().has_value() ? all_values.result().value() : "", - header_separator); - } - - return vary_key; -} - -Http::RequestHeaderMapPtr -VaryHeader::possibleVariedHeaders(const Http::RequestHeaderMap& request_headers) const { - Http::RequestHeaderMapPtr possible_headers = - Http::createHeaderMap({}); - - absl::flat_hash_set header_names; - CacheHeadersUtils::getAllMatchingHeaderNames(request_headers, allow_list_, header_names); - - for (const absl::string_view& header : header_names) { - const auto lower_case_header = Http::LowerCaseString(std::string{header}); - const auto value = request_headers.get(lower_case_header); - for (size_t i = 0; i < value.size(); i++) { - possible_headers->addCopy(lower_case_header, value[i]->value().getStringView()); - } + headerSeparator); } - return possible_headers; + return vary_identifier; } } // namespace Cache diff --git a/source/extensions/filters/http/cache/cache_headers_utils.h b/source/extensions/filters/http/cache/cache_headers_utils.h index 5c170452a9697..06737d7a2b2c5 100644 --- a/source/extensions/filters/http/cache/cache_headers_utils.h +++ b/source/extensions/filters/http/cache/cache_headers_utils.h @@ -10,6 +10,7 @@ #include "source/common/http/headers.h" #include "source/common/protobuf/protobuf.h" +#include "absl/container/btree_set.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" @@ -93,58 +94,67 @@ struct ResponseCacheControl { bool operator==(const RequestCacheControl& lhs, const RequestCacheControl& rhs); bool operator==(const ResponseCacheControl& lhs, const ResponseCacheControl& rhs); -class CacheHeadersUtils { +namespace CacheHeadersUtils { +// Parses header_entry as an HTTP time. Returns SystemTime() if +// header_entry is null or malformed. +SystemTime httpTime(const Http::HeaderEntry* header_entry); + +// Calculates the age of a cached response +Seconds calculateAge(const Http::ResponseHeaderMap& response_headers, SystemTime response_time, + SystemTime now); + +/** + * Read a leading positive decimal integer value and advance "*str" past the + * digits read. If overflow occurs, or no digits exist, return + * absl::nullopt without advancing "*str". + */ +absl::optional readAndRemoveLeadingDigits(absl::string_view& str); + +// Add to out all header names from the given map that match any of the given rules. +void getAllMatchingHeaderNames(const Http::HeaderMap& headers, + const std::vector& ruleset, + absl::flat_hash_set& out); + +// Parses the values of a comma-delimited list as defined per +// https://tools.ietf.org/html/rfc7230#section-7. +std::vector parseCommaDelimitedHeader(const Http::HeaderMap::GetResult& entry); +} // namespace CacheHeadersUtils + +class VaryAllowList { public: - // Parses header_entry as an HTTP time. Returns SystemTime() if - // header_entry is null or malformed. - static SystemTime httpTime(const Http::HeaderEntry* header_entry); - - // Calculates the age of a cached response - static Seconds calculateAge(const Http::ResponseHeaderMap& response_headers, - SystemTime response_time, SystemTime now); - - /** - * Read a leading positive decimal integer value and advance "*str" past the - * digits read. If overflow occurs, or no digits exist, return - * absl::nullopt without advancing "*str". - */ - static absl::optional readAndRemoveLeadingDigits(absl::string_view& str); - - // Add to out all header names from the given map that match any of the given rules. - static void getAllMatchingHeaderNames(const Http::HeaderMap& headers, - const std::vector& ruleset, - absl::flat_hash_set& out); - - // Parses the values of a comma-delimited list as defined per - // https://tools.ietf.org/html/rfc7230#section-7. - static std::vector parseCommaDelimitedList(const Http::HeaderMap::GetResult& entry); -}; - -class VaryHeader { -public: - // Checks if the headers contain a non-empty value in the Vary header. - static bool hasVary(const Http::ResponseHeaderMap& headers); - - // Creates a single string combining the values of the varied headers from entry_headers. - static std::string createVaryKey(const Http::HeaderMap::GetResult& vary_header, - const Http::RequestHeaderMap& entry_headers); - // Parses the allow list from the Cache Config into the object's private allow_list_. - VaryHeader(const Protobuf::RepeatedPtrField& allow_list); + VaryAllowList( + const Protobuf::RepeatedPtrField& allow_list); // Checks if the headers contain an allowed value in the Vary header. - bool isAllowed(const Http::ResponseHeaderMap& headers) const; + bool allowsHeaders(const Http::ResponseHeaderMap& headers) const; - // Returns a header map containing the subset of the original headers that can be varied from the - // request. - Http::RequestHeaderMapPtr - possibleVariedHeaders(const Http::RequestHeaderMap& request_headers) const; + // Checks if this vary header value is allowed to vary cache entries. + bool allowsValue(const absl::string_view header) const; private: // Stores the matching rules that define whether a header is allowed to be varied. std::vector allow_list_; }; +namespace VaryHeaderUtils { +// Checks if the headers contain a non-empty value in the Vary header. +bool hasVary(const Http::ResponseHeaderMap& headers); + +// Retrieve all the individual header values from the provided response header +// map across all vary header entries. +absl::btree_set getVaryValues(const Envoy::Http::ResponseHeaderMap& headers); + +// Creates a single string combining the values of the varied headers from +// entry_headers. Returns an absl::nullopt if no valid vary key can be created +// and the response should not be cached (eg. when disallowed vary headers are +// present in the response). +absl::optional +createVaryIdentifier(const VaryAllowList& allow_list, + const absl::btree_set& vary_header_values, + const Envoy::Http::RequestHeaderMap& request_headers); +} // namespace VaryHeaderUtils + } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/cache/cache_policy.h b/source/extensions/filters/http/cache/cache_policy.h new file mode 100644 index 0000000000000..a283f20fc10b6 --- /dev/null +++ b/source/extensions/filters/http/cache/cache_policy.h @@ -0,0 +1,107 @@ +#pragma once + +#include "envoy/http/header_map.h" +#include "envoy/stream_info/filter_state.h" + +#include "source/extensions/filters/http/cache/cache_headers_utils.h" +#include "source/extensions/filters/http/cache/http_cache.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +/** + * Contains information about whether the cache entry is usable. + */ +struct CacheEntryUsability { + /** + * Whether the cache entry is usable, additional checks are required to be usable, or unusable. + */ + CacheEntryStatus status = CacheEntryStatus::Unusable; + /** + * Value to be put in the Age header for cache responses. + */ + Seconds age = Seconds::max(); +}; + +class CachePolicyCallbacks { +public: + virtual ~CachePolicyCallbacks() = default; + + virtual const StreamInfo::FilterStateSharedPtr& filterState() PURE; +}; + +/** + * An extension point for deployment specific caching behavior. + */ +class CachePolicy { +public: + virtual ~CachePolicy() = default; + + /** + * Calculates the lookup key for storing the entry in the cache. + * @param request_headers - headers from the request the CacheFilter is currently processing. + */ + virtual Key createCacheKey(const Http::RequestHeaderMap& request_headers) PURE; + + /** + * Determines the cacheability of the response during decoding. + * @param request_headers - headers from the request the CacheFilter is currently processing. + * @param request_cache_control - the result of parsing the request's Cache-Control header, parsed + * by the caller. + * @return true if the response may be cached, based on the contents of the request. + */ + virtual bool requestCacheable(const Http::RequestHeaderMap& request_headers, + const RequestCacheControl& request_cache_control) PURE; + + /** + * Determines the cacheability of the response during encoding. + * @param request_headers - headers from the request the CacheFilter is currently processing. + * @param response_headers - headers from the upstream response the CacheFilter is currently + * processing. + * @param response_cache_control - the result of parsing the response's Cache-Control header, + * parsed by the caller. + * @param vary_allow_list - list of headers that the cache will respect when creating the Key for + * Vary-differentiated responses. + * @return true if the response may be cached. + */ + virtual bool responseCacheable(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& response_headers, + const ResponseCacheControl& response_cache_control, + const VaryHeader& vary_allow_list) PURE; + + /** + * Determines whether the cached entry may be used directly or must be validated with upstream. + * @param request_headers - request headers associated with the response_headers. + * @param cached_response_headers - headers from the cached response. + * @param request_cache_control - the parsed result of the request's Cache-Control header, parsed + * by the caller. + * @param cached_response_cache_control - the parsed result of the response's Cache-Control + * header, parsed by the caller. + * @param content_length - the byte length of the cached content. + * @param cached_metadata - the metadata that has been stored along side the cached entry. + * @param now - the timestamp for this request. + * @return details about whether or not the cached entry can be used. + */ + virtual CacheEntryUsability + computeCacheEntryUsability(const Http::RequestHeaderMap& request_headers, + const Http::ResponseHeaderMap& cached_response_headers, + const RequestCacheControl& request_cache_control, + const ResponseCacheControl& cached_response_cache_control, + const uint64_t content_length, const ResponseMetadata& cached_metadata, + SystemTime now) PURE; + + /** + * Performs actions when StreamInfo and FilterState become available, for + * example for logging and observability, or to adapt CacheFilter behavior based on + * route-specific CacheFilter config. + * @param callbacks - Gives access to StreamInfo and FilterState + */ + virtual void setCallbacks(CachePolicyCallbacks& callbacks) PURE; +}; + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/cacheability_utils.cc b/source/extensions/filters/http/cache/cacheability_utils.cc index 5a34dcb87c949..ac6c927d804db 100644 --- a/source/extensions/filters/http/cache/cacheability_utils.cc +++ b/source/extensions/filters/http/cache/cacheability_utils.cc @@ -57,7 +57,7 @@ bool CacheabilityUtils::canServeRequestFromCache(const Http::RequestHeaderMap& h } bool CacheabilityUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers, - const VaryHeader& vary_allow_list) { + const VaryAllowList& vary_allow_list) { absl::string_view cache_control = headers.getInlineValue(CacheCustomHeaders::responseCacheControl()); ResponseCacheControl response_cache_control(cache_control); @@ -74,7 +74,7 @@ bool CacheabilityUtils::isCacheableResponse(const Http::ResponseHeaderMap& heade return !response_cache_control.no_store_ && cacheableStatusCodes().contains((headers.getStatusValue())) && has_validation_data && - vary_allow_list.isAllowed(headers); + vary_allow_list.allowsHeaders(headers); } } // namespace Cache diff --git a/source/extensions/filters/http/cache/cacheability_utils.h b/source/extensions/filters/http/cache/cacheability_utils.h index 00f5cfd8bfdf1..8418011f08c2a 100644 --- a/source/extensions/filters/http/cache/cacheability_utils.h +++ b/source/extensions/filters/http/cache/cacheability_utils.h @@ -8,24 +8,23 @@ namespace Envoy { namespace Extensions { namespace HttpFilters { namespace Cache { -class CacheabilityUtils { -public: - // Checks if a request can be served from cache. - // This does not depend on cache-control headers as - // request cache-control headers only decide whether - // validation is required and whether the response can be cached. - static bool canServeRequestFromCache(const Http::RequestHeaderMap& headers); +namespace CacheabilityUtils { +// Checks if a request can be served from cache. +// This does not depend on cache-control headers as +// request cache-control headers only decide whether +// validation is required and whether the response can be cached. +bool canServeRequestFromCache(const Http::RequestHeaderMap& headers); - // Checks if a response can be stored in cache. - // Note that if a request is not cacheable according to 'canServeRequestFromCache' - // then its response is also not cacheable. - // Therefore, canServeRequestFromCache, isCacheableResponse and - // CacheFilter::request_allows_inserts_ together should cover - // https://httpwg.org/specs/rfc7234.html#response.cacheability. Head requests are not - // cacheable. However, this function is never called for head requests. - static bool isCacheableResponse(const Http::ResponseHeaderMap& headers, - const VaryHeader& vary_allow_list); -}; +// Checks if a response can be stored in cache. +// Note that if a request is not cacheable according to 'canServeRequestFromCache' +// then its response is also not cacheable. +// Therefore, canServeRequestFromCache, isCacheableResponse and +// CacheFilter::request_allows_inserts_ together should cover +// https://httpwg.org/specs/rfc7234.html#response.cacheability. Head requests are not +// cacheable. However, this function is never called for head requests. +bool isCacheableResponse(const Http::ResponseHeaderMap& headers, + const VaryAllowList& vary_allow_list); +} // namespace CacheabilityUtils } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index 3a24900fb9822..a8c49950564d3 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -24,8 +24,9 @@ namespace HttpFilters { namespace Cache { LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp, - const VaryHeader& vary_allow_list) - : timestamp_(timestamp) { + const VaryAllowList& vary_allow_list) + : request_headers_(Http::createHeaderMap(request_headers)), + vary_allow_list_(vary_allow_list), timestamp_(timestamp) { // These ASSERTs check prerequisites. A request without these headers can't be looked up in cache; // CacheFilter doesn't create LookupRequests for such requests. ASSERT(request_headers.Path(), "Can't form cache lookup key for malformed Http::RequestHeaderMap " @@ -50,8 +51,6 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst key_.set_host(std::string(request_headers.getHostValue())); key_.set_path(std::string(request_headers.getPathValue())); key_.set_clear_http(scheme == scheme_values.Http); - - vary_headers_ = vary_allow_list.possibleVariedHeaders(request_headers); } // Unless this API is still alpha, calls to stableHashKey() must always return diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index ebd6c888ec97c..7438646649dd3 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -186,7 +186,7 @@ class LookupRequest { public: // Prereq: request_headers's Path(), Scheme(), and Host() are non-null. LookupRequest(const Http::RequestHeaderMap& request_headers, SystemTime timestamp, - const VaryHeader& vary_allow_list); + const VaryAllowList& vary_allow_list); const RequestCacheControl& requestCacheControl() const { return request_cache_control_; } @@ -206,8 +206,8 @@ class LookupRequest { LookupResult makeLookupResult(Http::ResponseHeaderMapPtr&& response_headers, ResponseMetadata&& metadata, uint64_t content_length) const; - // Warning: this should not be accessed out-of-thread! - const Http::RequestHeaderMap& getVaryHeaders() const { return *vary_headers_; } + const Http::RequestHeaderMap& requestHeaders() const { return *request_headers_; } + const VaryAllowList& varyAllowList() const { return vary_allow_list_; } private: void initializeRequestCacheControl(const Http::RequestHeaderMap& request_headers); @@ -216,15 +216,10 @@ class LookupRequest { Key key_; std::vector request_range_spec_; + Http::RequestHeaderMapPtr request_headers_; + const VaryAllowList& vary_allow_list_; // Time when this LookupRequest was created (in response to an HTTP request). SystemTime timestamp_; - // The subset of this request's headers that match one of the rules in - // envoy::extensions::filters::http::cache::v3alpha::CacheConfig::allowed_vary_headers. If a cache - // storage implementation forwards lookup requests to a remote cache server that supports *vary* - // headers, that server may need to see these headers. For local implementations, it may be - // simpler to instead call makeLookupResult with each potential response. - Http::RequestHeaderMapPtr vary_headers_; - RequestCacheControl request_cache_control_; }; diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc index c39b136be8764..dc2947c457d93 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.cc @@ -48,8 +48,10 @@ class SimpleInsertContext : public InsertContext { public: SimpleInsertContext(LookupContext& lookup_context, SimpleHttpCache& cache) : key_(dynamic_cast(lookup_context).request().key()), - entry_vary_headers_( - dynamic_cast(lookup_context).request().getVaryHeaders()), + request_headers_( + dynamic_cast(lookup_context).request().requestHeaders()), + vary_allow_list_( + dynamic_cast(lookup_context).request().varyAllowList()), cache_(cache) {} void insertHeaders(const Http::ResponseHeaderMap& response_headers, @@ -84,18 +86,19 @@ class SimpleInsertContext : public InsertContext { private: void commit() { committed_ = true; - if (VaryHeader::hasVary(*response_headers_)) { + if (VaryHeaderUtils::hasVary(*response_headers_)) { cache_.varyInsert(key_, std::move(response_headers_), std::move(metadata_), body_.toString(), - entry_vary_headers_); + request_headers_, vary_allow_list_); } else { cache_.insert(key_, std::move(response_headers_), std::move(metadata_), body_.toString()); } } Key key_; + const Http::RequestHeaderMap& request_headers_; + const VaryAllowList& vary_allow_list_; Http::ResponseHeaderMapPtr response_headers_; ResponseMetadata metadata_; - const Http::RequestHeaderMap& entry_vary_headers_; SimpleHttpCache& cache_; Buffer::OwnedImpl body_; bool committed_ = false; @@ -106,11 +109,38 @@ LookupContextPtr SimpleHttpCache::makeLookupContext(LookupRequest&& request) { return std::make_unique(*this, std::move(request)); } -void SimpleHttpCache::updateHeaders(const LookupContext&, const Http::ResponseHeaderMap&, - const ResponseMetadata&) { - // TODO(toddmgreer): Support updating headers. - // Not implemented yet, however this is called during tests - // NOT_IMPLEMENTED_GCOVR_EXCL_LINE; +void SimpleHttpCache::updateHeaders(const LookupContext& lookup_context, + const Http::ResponseHeaderMap& response_headers, + const ResponseMetadata& metadata) { + const auto& simple_lookup_context = static_cast(lookup_context); + const Key& key = simple_lookup_context.request().key(); + absl::WriterMutexLock lock(&mutex_); + + auto iter = map_.find(key); + if (iter == map_.end() || !iter->second.response_headers_) { + return; + } + auto& entry = iter->second; + + // TODO(tangsaidi) handle Vary header updates properly + if (VaryHeaderUtils::hasVary(*(entry.response_headers_))) { + return; + } + + // https://www.rfc-editor.org/rfc/pdfrfc/rfc7234.txt.pdf + // 4.3.4 Freshening Stored Responses upon Validation + // use other header fields provided in the 304 (Not Modified) + // response to replace all instances of the corresponding header + // fields in the stored response. + // + // Assumptions: + // 1. The internet is fast, i.e. we get the result as soon as the server sends it. + // Race conditions would not be possible because we are always processing up-to-date data. + // 2. No key collision for etag. Therefore, if etag matches it's the same resource. + // 3. Backend is correct. etag is being used as a unique identifier to the resource + // TODO(tangsaidi) merge the header map instead of replacing it according to rfc7234 + entry.response_headers_ = Http::createHeaderMap(response_headers); + entry.metadata_ = metadata; } SimpleHttpCache::Entry SimpleHttpCache::lookup(const LookupRequest& request) { @@ -121,7 +151,7 @@ SimpleHttpCache::Entry SimpleHttpCache::lookup(const LookupRequest& request) { } ASSERT(iter->second.response_headers_); - if (VaryHeader::hasVary(*iter->second.response_headers_)) { + if (VaryHeaderUtils::hasVary(*iter->second.response_headers_)) { return varyLookup(request, iter->second.response_headers_); } else { return SimpleHttpCache::Entry{ @@ -143,12 +173,19 @@ SimpleHttpCache::varyLookup(const LookupRequest& request, // This method should be called from lookup, which holds the mutex for reading. mutex_.AssertReaderHeld(); - const auto vary_header = response_headers->get(Http::CustomHeaders::get().Vary); - ASSERT(!vary_header.empty()); + absl::btree_set vary_header_values = + VaryHeaderUtils::getVaryValues(*response_headers); + ASSERT(!vary_header_values.empty()); Key varied_request_key = request.key(); - const std::string vary_key = VaryHeader::createVaryKey(vary_header, request.getVaryHeaders()); - varied_request_key.add_custom_fields(vary_key); + const absl::optional vary_identifier = VaryHeaderUtils::createVaryIdentifier( + request.varyAllowList(), vary_header_values, request.requestHeaders()); + if (!vary_identifier.has_value()) { + // The vary allow list has changed and has made the vary header of this + // cached value not cacheable. + return SimpleHttpCache::Entry{}; + } + varied_request_key.add_custom_fields(vary_identifier.value()); auto iter = map_.find(varied_request_key); if (iter == map_.end()) { @@ -164,31 +201,38 @@ SimpleHttpCache::varyLookup(const LookupRequest& request, void SimpleHttpCache::varyInsert(const Key& request_key, Http::ResponseHeaderMapPtr&& response_headers, ResponseMetadata&& metadata, std::string&& body, - const Http::RequestHeaderMap& request_vary_headers) { + const Http::RequestHeaderMap& request_headers, + const VaryAllowList& vary_allow_list) { absl::WriterMutexLock lock(&mutex_); - const auto vary_header = response_headers->get(Http::CustomHeaders::get().Vary); - ASSERT(!vary_header.empty()); + absl::btree_set vary_header_values = + VaryHeaderUtils::getVaryValues(*response_headers); + ASSERT(!vary_header_values.empty()); // Insert the varied response. Key varied_request_key = request_key; - const std::string vary_key = VaryHeader::createVaryKey(vary_header, request_vary_headers); - varied_request_key.add_custom_fields(vary_key); + const absl::optional vary_identifier = + VaryHeaderUtils::createVaryIdentifier(vary_allow_list, vary_header_values, request_headers); + if (!vary_identifier.has_value()) { + // Skip the insert if we are unable to create a vary key. + return; + } + + varied_request_key.add_custom_fields(vary_identifier.value()); map_[varied_request_key] = SimpleHttpCache::Entry{std::move(response_headers), std::move(metadata), std::move(body)}; // Add a special entry to flag that this request generates varied responses. auto iter = map_.find(request_key); if (iter == map_.end()) { - Http::ResponseHeaderMapPtr vary_only_map = - Http::createHeaderMap({}); - // TODO(mattklein123): Support multiple vary headers and/or just make the vary header inline. - vary_only_map->setCopy(Http::CustomHeaders::get().Vary, - vary_header[0]->value().getStringView()); + Envoy::Http::ResponseHeaderMapPtr vary_only_map = + Envoy::Http::createHeaderMap({}); + vary_only_map->setCopy(Envoy::Http::CustomHeaders::get().Vary, + absl::StrJoin(vary_header_values, ",")); // TODO(cbdm): In a cache that evicts entries, we could maintain a list of the "varykey"s that // we have inserted as the body for this first lookup. This way, we would know which keys we - // have inserted for that resource. For the first entry simply use vary_key as the entry_list, - // for future entries append vary_key to existing list. + // have inserted for that resource. For the first entry simply use vary_identifier as the + // entry_list; for future entries append vary_identifier to existing list. std::string entry_list; map_[request_key] = SimpleHttpCache::Entry{std::move(vary_only_map), {}, std::move(entry_list)}; } diff --git a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h index 0cb45d7b58701..91d0e85dc5e78 100644 --- a/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h +++ b/source/extensions/filters/http/cache/simple_http_cache/simple_http_cache.h @@ -44,7 +44,8 @@ class SimpleHttpCache : public HttpCache { // Inserts a response that has been varied on certain headers. void varyInsert(const Key& request_key, Http::ResponseHeaderMapPtr&& response_headers, ResponseMetadata&& metadata, std::string&& body, - const Http::RequestHeaderMap& request_vary_headers); + const Http::RequestHeaderMap& request_headers, + const VaryAllowList& vary_allow_list); absl::Mutex mutex_; absl::flat_hash_map map_ ABSL_GUARDED_BY(mutex_); diff --git a/source/extensions/filters/http/csrf/csrf_filter.h b/source/extensions/filters/http/csrf/csrf_filter.h index 5697cf99f59e4..b52b9473a884f 100644 --- a/source/extensions/filters/http/csrf/csrf_filter.h +++ b/source/extensions/filters/http/csrf/csrf_filter.h @@ -39,7 +39,8 @@ class CsrfPolicy : public Router::RouteSpecificFilterConfig { : policy_(policy), runtime_(runtime) { for (const auto& additional_origin : policy.additional_origins()) { additional_origins_.emplace_back( - std::make_unique(additional_origin)); + std::make_unique>( + additional_origin)); } } diff --git a/source/extensions/filters/http/decompressor/decompressor_filter.h b/source/extensions/filters/http/decompressor/decompressor_filter.h index 5f13e6b3b13da..d1baf6f44035c 100644 --- a/source/extensions/filters/http/decompressor/decompressor_filter.h +++ b/source/extensions/filters/http/decompressor/decompressor_filter.h @@ -174,11 +174,11 @@ class DecompressorFilter : public Http::PassThroughFilter, headers.removeContentLength(); modifyContentEncoding(headers); - ENVOY_STREAM_LOG(debug, "do decompress {}: {}", callbacks, direction_config.logString(), + ENVOY_STREAM_LOG(trace, "do decompress {}: {}", callbacks, direction_config.logString(), headers); } else { direction_config.stats().not_decompressed_.inc(); - ENVOY_STREAM_LOG(debug, "do not decompress {}: {}", callbacks, direction_config.logString(), + ENVOY_STREAM_LOG(trace, "do not decompress {}: {}", callbacks, direction_config.logString(), headers); } diff --git a/source/extensions/filters/http/dynamic_forward_proxy/config.cc b/source/extensions/filters/http/dynamic_forward_proxy/config.cc index eb7dd21633a97..3f58fa19ca06a 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/config.cc @@ -15,8 +15,8 @@ Http::FilterFactoryCb DynamicForwardProxyFilterFactory::createFilterFactoryFromP const envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.threadLocal(), - context.api().randomGenerator(), context.runtime(), context.scope()); + context.singletonManager(), context.dispatcher(), context.threadLocal(), context.api(), + context.runtime(), context.scope(), context.messageValidationVisitor()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/http/ext_authz/config.cc b/source/extensions/filters/http/ext_authz/config.cc index bcff25d3704e5..04413850987d2 100644 --- a/source/extensions/filters/http/ext_authz/config.cc +++ b/source/extensions/filters/http/ext_authz/config.cc @@ -25,6 +25,8 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( const auto filter_config = std::make_shared( proto_config, context.scope(), context.runtime(), context.httpContext(), stats_prefix, context.getServerFactoryContext().bootstrap()); + // The callback is created in main thread and executed in worker thread, variables except factory + // context must be captured by value into the callback. Http::FilterFactoryCb callback; if (proto_config.has_http_service()) { @@ -42,32 +44,30 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( }; } else if (proto_config.grpc_service().has_google_grpc()) { // Google gRPC client. - const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout); - callback = [&context, filter_config, timeout_ms, proto_config, - transport_api_version = Config::Utility::getAndCheckTransportVersion(proto_config)]( - Http::FilterChainFactoryCallbacks& callbacks) { + Config::Utility::checkTransportVersion(proto_config); + callback = [&context, filter_config, timeout_ms, + proto_config](Http::FilterChainFactoryCallbacks& callbacks) { auto client = std::make_unique( context.clusterManager().grpcAsyncClientManager().getOrCreateRawAsyncClient( proto_config.grpc_service(), context.scope(), true, Grpc::CacheOption::AlwaysCache), - std::chrono::milliseconds(timeout_ms), transport_api_version); + std::chrono::milliseconds(timeout_ms)); callbacks.addStreamFilter(std::make_shared(filter_config, std::move(client))); }; } else { // Envoy gRPC client. - - Grpc::RawAsyncClientSharedPtr raw_client = - context.clusterManager().grpcAsyncClientManager().getOrCreateRawAsyncClient( - proto_config.grpc_service(), context.scope(), true, Grpc::CacheOption::AlwaysCache); const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout); - callback = [raw_client, filter_config, timeout_ms, - transport_api_version = Config::Utility::getAndCheckTransportVersion(proto_config)]( - Http::FilterChainFactoryCallbacks& callbacks) { + Config::Utility::checkTransportVersion(proto_config); + callback = [grpc_service = proto_config.grpc_service(), &context, filter_config, + timeout_ms](Http::FilterChainFactoryCallbacks& callbacks) { + Grpc::RawAsyncClientSharedPtr raw_client = + context.clusterManager().grpcAsyncClientManager().getOrCreateRawAsyncClient( + grpc_service, context.scope(), true, Grpc::CacheOption::AlwaysCache); auto client = std::make_unique( - raw_client, std::chrono::milliseconds(timeout_ms), transport_api_version); + raw_client, std::chrono::milliseconds(timeout_ms)); callbacks.addStreamFilter(std::make_shared(filter_config, std::move(client))); }; } diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 3af51ba77efd0..29a6ca8be8437 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -374,7 +374,10 @@ void Filter::continueDecoding() { } Filter::PerRouteFlags Filter::getPerRouteFlags(const Router::RouteConstSharedPtr& route) const { - if (route == nullptr || route->routeEntry() == nullptr) { + if (route == nullptr || + (!Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect") && + route->routeEntry() == nullptr)) { return PerRouteFlags{true /*skip_check_*/, false /*skip_request_body_buffering_*/}; } diff --git a/source/extensions/filters/http/ext_proc/ext_proc.cc b/source/extensions/filters/http/ext_proc/ext_proc.cc index 711e377161cc8..00d22c92a665d 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.cc +++ b/source/extensions/filters/http/ext_proc/ext_proc.cc @@ -24,6 +24,7 @@ using Http::ResponseHeaderMap; using Http::ResponseTrailerMap; static const std::string kErrorPrefix = "ext_proc error"; +static const int DefaultImmediateStatus = 200; void Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) { Http::PassThroughFilter::setDecoderFilterCallbacks(callbacks); @@ -50,6 +51,7 @@ Filter::StreamOpenState Filter::openStream() { } void Filter::onDestroy() { + ENVOY_LOG(trace, "onDestroy"); // Make doubly-sure we no longer use the stream, as // per the filter contract. processing_complete_ = true; @@ -221,7 +223,7 @@ FilterDataStatus Filter::onData(ProcessorState& state, Buffer::Instance& data, b // At this point we will continue, but with no data, because that will come later if (end_stream) { - // But we need to buffer the last chunk because it's our last chance to do stuff + // But we need to stop iteration for the last chunk because it's our last chance to do stuff state.setPaused(true); result = FilterDataStatus::StopIterationNoBuffer; } else { @@ -230,8 +232,55 @@ FilterDataStatus Filter::onData(ProcessorState& state, Buffer::Instance& data, b break; } case ProcessingMode::BUFFERED_PARTIAL: - ENVOY_LOG(debug, "Ignoring unimplemented request body processing mode"); - result = FilterDataStatus::Continue; + // BUFFERED_PARTIAL mode works as follows: + // + // 1) As data chunks arrive, we move the data into a new buffer, which we store + // in the buffer queue, and continue the filter stream with an empty buffer. This + // is the same thing that we do in STREAMING mode. + // 2) If end of stream is reached before the queue reaches the buffer limit, we + // send the buffered data to the server and essentially behave as if we are in + // buffered mode. + // 3) If instead the buffer limit is reached before end of stream, then we also + // send the buffered data to the server, and raise the watermark to prevent Envoy + // from running out of memory while we wait. + // 4) It is possible that Envoy will keep sending us data even in that case, so + // we must continue to queue data and prepare to re-inject it later. + if (state.partialBodyProcessed()) { + // We already sent and received the buffer, so everything else just falls through. + ENVOY_LOG(trace, "Partial buffer limit reached"); + result = FilterDataStatus::Continue; + } else if (state.callbackState() == + ProcessorState::CallbackState::BufferedPartialBodyCallback) { + // More data came in while we were waiting for a callback result. We need + // to queue it and deliver it later in case the callback changes the data. + state.enqueueStreamingChunk(data, false, false); + ENVOY_LOG(trace, "Call in progress for partial mode"); + state.setPaused(true); + result = FilterDataStatus::StopIterationNoBuffer; + } else if (end_stream || state.queueOverHighLimit()) { + switch (openStream()) { + case StreamOpenState::Error: + return FilterDataStatus::StopIterationNoBuffer; + case StreamOpenState::IgnoreError: + return FilterDataStatus::Continue; + case StreamOpenState::Ok: + // Fall through + break; + } + state.enqueueStreamingChunk(data, false, false); + // Put all buffered data so far into one big buffer + const auto& all_data = state.consolidateStreamedChunks(true); + ENVOY_LOG(debug, "Sending {} bytes of data in buffered partial mode. end_stream = {}", + all_data.data.length(), end_stream); + sendBodyChunk(state, all_data.data, + ProcessorState::CallbackState::BufferedPartialBodyCallback, end_stream); + result = FilterDataStatus::StopIterationNoBuffer; + state.setPaused(true); + } else { + // Keep on running and buffering + state.enqueueStreamingChunk(data, false, false); + result = FilterDataStatus::Continue; + } break; case ProcessingMode::NONE: default: @@ -414,6 +463,7 @@ void Filter::onReceiveMessage(std::unique_ptr&& r) { // We won't be sending anything more to the stream after we // receive this message. processing_complete_ = true; + cleanUpTimers(); sendImmediateResponse(response->immediate_response()); message_handled = true; break; @@ -511,7 +561,11 @@ void Filter::cleanUpTimers() { } void Filter::sendImmediateResponse(const ImmediateResponse& response) { - const auto status_code = response.has_status() ? response.status().code() : 200; + auto status_code = response.has_status() ? response.status().code() : DefaultImmediateStatus; + if (!MutationUtils::isValidHttpStatus(status_code)) { + ENVOY_LOG(debug, "Ignoring attempt to set invalid HTTP status {}", status_code); + status_code = DefaultImmediateStatus; + } const auto grpc_status = response.has_grpc_status() ? absl::optional(response.grpc_status().status()) @@ -523,6 +577,7 @@ void Filter::sendImmediateResponse(const ImmediateResponse& response) { }; sent_immediate_response_ = true; + ENVOY_LOG(debug, "Sending local reply with status code {}", status_code); encoder_callbacks_->sendLocalReply(static_cast(status_code), response.body(), mutate_headers, grpc_status, response.details()); } diff --git a/source/extensions/filters/http/ext_proc/mutation_utils.cc b/source/extensions/filters/http/ext_proc/mutation_utils.cc index 8fa43ade130de..1079716b8a7c0 100644 --- a/source/extensions/filters/http/ext_proc/mutation_utils.cc +++ b/source/extensions/filters/http/ext_proc/mutation_utils.cc @@ -14,6 +14,7 @@ namespace ExternalProcessing { using Http::Headers; using Http::LowerCaseString; +using envoy::config::core::v3::HeaderValueOption; using envoy::service::ext_proc::v3alpha::BodyMutation; using envoy::service::ext_proc::v3alpha::BodyResponse; using envoy::service::ext_proc::v3alpha::CommonResponse; @@ -56,19 +57,26 @@ void MutationUtils::applyHeaderMutations(const HeaderMutation& mutation, Http::H if (!sh.has_header()) { continue; } - if (isSettableHeader(sh.header().key(), replacing_message)) { + if (!isSettableHeader(sh, replacing_message)) { + // Log the failure to set the header here, but don't log the value in case it's + // something sensitive like the Authorization header. + ENVOY_LOG(debug, "Ignorning improper attempt to set header {}", sh.header().key()); + } else { // Make "false" the default. This is logical and matches the ext_authz // filter. However, the router handles this same protobuf and uses "true" // as the default instead. const bool append = PROTOBUF_GET_WRAPPED_OR_DEFAULT(sh, append, false); - ENVOY_LOG(trace, "Setting header {} append = {}", sh.header().key(), append); - if (append) { - headers.addCopy(LowerCaseString(sh.header().key()), sh.header().value()); + const LowerCaseString lcKey(sh.header().key()); + if (append && !headers.get(lcKey).empty() && !isAppendableHeader(lcKey)) { + ENVOY_LOG(debug, "Ignoring duplicate value for header {}", sh.header().key()); } else { - headers.setCopy(LowerCaseString(sh.header().key()), sh.header().value()); + ENVOY_LOG(trace, "Setting header {} append = {}", sh.header().key(), append); + if (append) { + headers.addCopy(lcKey, sh.header().value()); + } else { + headers.setCopy(lcKey, sh.header().value()); + } } - } else { - ENVOY_LOG(debug, "Header {} is not settable", sh.header().key()); } } } @@ -112,16 +120,40 @@ void MutationUtils::applyBodyMutations(const BodyMutation& mutation, Buffer::Ins } } +bool MutationUtils::isValidHttpStatus(int code) { return (code >= 200); } + // Ignore attempts to set certain sensitive headers that can break later processing. // We may re-enable some of these after further testing. This logic is specific // to the ext_proc filter so it is not shared with HeaderUtils. -bool MutationUtils::isSettableHeader(absl::string_view key, bool replacing_message) { +bool MutationUtils::isSettableHeader(const HeaderValueOption& header, bool replacing_message) { + const auto& key = header.header().key(); const auto& headers = Headers::get(); - return !absl::EqualsIgnoreCase(key, headers.HostLegacy.get()) && - !absl::EqualsIgnoreCase(key, headers.Host.get()) && - (!absl::EqualsIgnoreCase(key, headers.Method.get()) || replacing_message) && - !absl::EqualsIgnoreCase(key, headers.Scheme.get()) && - !absl::StartsWithIgnoreCase(key, headers.prefix()); + if (absl::EqualsIgnoreCase(key, headers.HostLegacy.get()) || + absl::EqualsIgnoreCase(key, headers.Host.get()) || + (absl::EqualsIgnoreCase(key, headers.Method.get()) && !replacing_message) || + absl::EqualsIgnoreCase(key, headers.Scheme.get()) || + absl::StartsWithIgnoreCase(key, headers.prefix())) { + return false; + } + if (absl::EqualsIgnoreCase(key, headers.Status.get())) { + const auto& value = header.header().value(); + uint32_t status; + if (!absl::SimpleAtoi(value, &status)) { + ENVOY_LOG(debug, "Invalid value {} for HTTP status code", value); + return false; + } + if (!isValidHttpStatus(status)) { + ENVOY_LOG(debug, "Invalid HTTP status code {}", status); + return false; + } + } + return true; +} + +// Ignore attempts to append a second value to any system header, as in general those +// were never designed to support multiple values. +bool MutationUtils::isAppendableHeader(absl::string_view key) { + return !key.empty() && key[0] != ':'; } } // namespace ExternalProcessing diff --git a/source/extensions/filters/http/ext_proc/mutation_utils.h b/source/extensions/filters/http/ext_proc/mutation_utils.h index 14b28cf306e7d..f57c13793d8b5 100644 --- a/source/extensions/filters/http/ext_proc/mutation_utils.h +++ b/source/extensions/filters/http/ext_proc/mutation_utils.h @@ -37,8 +37,13 @@ class MutationUtils : public Logger::Loggable { static void applyBodyMutations(const envoy::service::ext_proc::v3alpha::BodyMutation& mutation, Buffer::Instance& buffer); + // Determine if a particular HTTP status code is valid. + static bool isValidHttpStatus(int code); + private: - static bool isSettableHeader(absl::string_view key, bool replacing_message); + static bool isSettableHeader(const envoy::config::core::v3::HeaderValueOption& header, + bool replacing_message); + static bool isAppendableHeader(absl::string_view key); }; } // namespace ExternalProcessing diff --git a/source/extensions/filters/http/ext_proc/processor_state.cc b/source/extensions/filters/http/ext_proc/processor_state.cc index 303798657dd35..8cc1bb21de791 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.cc +++ b/source/extensions/filters/http/ext_proc/processor_state.cc @@ -27,7 +27,8 @@ void ProcessorState::startMessageTimer(Event::TimerCb cb, std::chrono::milliseco bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { if (callback_state_ == CallbackState::HeadersCallback) { - ENVOY_LOG(debug, "applying headers response"); + ENVOY_LOG(debug, "applying headers response. body mode = {}", + ProcessingMode::BodySendMode_Name(body_mode_)); const auto& common_response = response.response(); MutationUtils::applyCommonHeaderResponse(response, *headers_); if (response.response().clear_route_cache()) { @@ -54,40 +55,32 @@ bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { }); } } - // Once this message is received, we won't send anything more on this request // or response to the processor. Clear flags to make sure. body_mode_ = ProcessingMode::NONE; send_trailers_ = false; clearWatermark(); - } else { if (no_body_) { // Fall through if there was never a body in the first place. + ENVOY_LOG(debug, "The message had no body"); + } else if (complete_body_available_ && body_mode_ != ProcessingMode::NONE) { + // If we get here, then all the body data came in before the header message + // was complete, and the server wants the body. It doesn't matter whether the + // processing mode is buffered, streamed, or partially streamed -- if we get + // here then the whole body is in the buffer and we can proceed as if the + // "buffered" processing mode was set. + ENVOY_LOG(debug, "Sending buffered request body message"); + filter_.sendBufferedData(*this, ProcessorState::CallbackState::BufferedBodyCallback, true); + clearWatermark(); + return true; } else if (body_mode_ == ProcessingMode::BUFFERED) { - if (complete_body_available_) { - // If we get here, then all the body data came in before the header message - // was complete, and the server wants the body. So, don't continue filter - // processing, but send the buffered request body now. - ENVOY_LOG(debug, "Sending buffered request body message"); - filter_.sendBufferedData(*this, ProcessorState::CallbackState::BufferedBodyCallback, - true); - } - // Otherwise, we're not ready to continue processing because then + // Here, we're not ready to continue processing because then // we won't be able to modify the headers any more, so do nothing and // let the doData callback handle body chunks until the end is reached. clearWatermark(); return true; } else if (body_mode_ == ProcessingMode::STREAMED) { - if (complete_body_available_) { - // All data came in before headers callback, so act just as if we were buffering - // since effectively this is the same thing. - ENVOY_LOG(debug, "Sending buffered body data for whole message"); - filter_.sendBufferedData(*this, ProcessorState::CallbackState::BufferedBodyCallback, - true); - clearWatermark(); - return true; - } if (hasBufferedData()) { // We now know that we need to process what we have buffered in streaming mode. // Move the current buffer into the queue for remote processing and clear the @@ -95,7 +88,7 @@ bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { Buffer::OwnedImpl buffered_chunk; modifyBufferedData( [&buffered_chunk](Buffer::Instance& data) { buffered_chunk.move(data); }); - ENVOY_LOG(debug, "Sending first chunk using buffered data"); + ENVOY_LOG(debug, "Sending first chunk using buffered data ({})", buffered_chunk.length()); filter_.sendBodyChunk(*this, buffered_chunk, ProcessorState::CallbackState::StreamedBodyCallback, false); enqueueStreamingChunk(buffered_chunk, false, true); @@ -105,6 +98,29 @@ bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { } continueIfNecessary(); return true; + } else if (body_mode_ == ProcessingMode::BUFFERED_PARTIAL) { + if (hasBufferedData()) { + // Put the data buffered so far into the buffer queue. When more data comes in + // we'll check to see if we have reached the watermark. + ENVOY_LOG(debug, "Enqueuing body data buffered so far"); + Buffer::OwnedImpl buffered_chunk; + modifyBufferedData( + [&buffered_chunk](Buffer::Instance& data) { buffered_chunk.move(data); }); + enqueueStreamingChunk(buffered_chunk, false, true); + } + if (queueOverHighLimit()) { + // We reached the limit so send what we have. This is different from the buffered + // case because we need to be set up to handle data that might come in while + // waiting for the callback, so the chunk needs to stay on the queue. + const auto& all_data = consolidateStreamedChunks(true); + ENVOY_LOG(debug, "Sending {} bytes of data in buffered partial mode before end stream"); + filter_.sendBodyChunk(*this, all_data.data, + ProcessorState::CallbackState::BufferedPartialBodyCallback, false); + } else { + clearWatermark(); + continueIfNecessary(); + } + return true; } if (send_trailers_ && trailers_available_) { // Trailers came in while we were waiting for this response, and the server @@ -129,7 +145,8 @@ bool ProcessorState::handleBodyResponse(const BodyResponse& response) { bool should_continue = false; if (callback_state_ == CallbackState::BufferedBodyCallback || callback_state_ == CallbackState::StreamedBodyCallback || - callback_state_ == CallbackState::StreamedBodyCallbackFinishing) { + callback_state_ == CallbackState::StreamedBodyCallbackFinishing || + callback_state_ == CallbackState::BufferedPartialBodyCallback) { ENVOY_LOG(debug, "Processing body response"); if (callback_state_ == CallbackState::BufferedBodyCallback) { ENVOY_LOG(debug, "Applying body response to buffered data. State = {}", callback_state_); @@ -139,7 +156,6 @@ bool ProcessorState::handleBodyResponse(const BodyResponse& response) { clearWatermark(); callback_state_ = CallbackState::Idle; should_continue = true; - } else if (callback_state_ == CallbackState::StreamedBodyCallback || callback_state_ == CallbackState::StreamedBodyCallbackFinishing) { bool delivered_one = false; @@ -164,9 +180,34 @@ bool ProcessorState::handleBodyResponse(const BodyResponse& response) { if (queueBelowLowLimit()) { clearWatermark(); } - if (chunks_for_processing_.empty()) { + if (chunk_queue_.empty()) { callback_state_ = CallbackState::Idle; } + } else if (callback_state_ == CallbackState::BufferedPartialBodyCallback) { + // Apply changes to the buffer that we sent to the server + auto queued_chunk = dequeueStreamingChunk(false); + ENVOY_BUG(queued_chunk, "Bad partial body callback state"); + auto chunk = std::move(*queued_chunk); + MutationUtils::applyCommonBodyResponse(response, nullptr, chunk->data); + if (chunk->data.length() > 0) { + ENVOY_LOG(trace, "Injecting {} bytes of processed data to filter stream", + chunk->data.length()); + injectDataToFilterChain(chunk->data, false); + } + should_continue = true; + clearWatermark(); + callback_state_ = CallbackState::Idle; + partial_body_processed_ = true; + + // If anything else is left on the queue, inject it too + while (auto leftover_chunk = dequeueStreamingChunk(false)) { + auto chunk = std::move(*leftover_chunk); + if (chunk->data.length() > 0) { + ENVOY_LOG(trace, "Injecting {} bytes of leftover data to filter stream", + chunk->data.length()); + injectDataToFilterChain(chunk->data, false); + } + } } if (response.response().clear_route_cache()) { @@ -208,30 +249,12 @@ bool ProcessorState::handleTrailersResponse(const TrailersResponse& response) { void ProcessorState::enqueueStreamingChunk(Buffer::Instance& data, bool end_stream, bool delivered) { - bytes_enqueued_ += data.length(); - auto next_chunk = std::make_unique(); - next_chunk->data.move(data); - next_chunk->end_stream = end_stream; - next_chunk->delivered = delivered; - chunks_for_processing_.push_back(std::move(next_chunk)); + chunk_queue_.push(data, end_stream, delivered); if (queueOverHighLimit()) { requestWatermark(); } } -absl::optional ProcessorState::dequeueStreamingChunk(bool undelivered_only) { - if (chunks_for_processing_.empty()) { - return absl::nullopt; - } - if (undelivered_only && chunks_for_processing_.front()->delivered) { - return absl::nullopt; - } - QueuedChunkPtr chunk = std::move(chunks_for_processing_.front()); - chunks_for_processing_.pop_front(); - bytes_enqueued_ -= chunk->data.length(); - return chunk; -} - void ProcessorState::clearAsyncState() { cleanUpTimer(); while (auto queued_chunk = dequeueStreamingChunk(false)) { @@ -316,6 +339,46 @@ void EncodingProcessorState::clearWatermark() { } } +void ChunkQueue::push(Buffer::Instance& data, bool end_stream, bool delivered) { + bytes_enqueued_ += data.length(); + auto next_chunk = std::make_unique(); + next_chunk->data.move(data); + next_chunk->end_stream = end_stream; + next_chunk->delivered = delivered; + queue_.push_back(std::move(next_chunk)); +} + +absl::optional ChunkQueue::pop(bool undelivered_only) { + if (queue_.empty()) { + return absl::nullopt; + } + if (undelivered_only && queue_.front()->delivered) { + return absl::nullopt; + } + QueuedChunkPtr chunk = std::move(queue_.front()); + queue_.pop_front(); + bytes_enqueued_ -= chunk->data.length(); + return chunk; +} + +const QueuedChunk& ChunkQueue::consolidate(bool delivered) { + if (queue_.size() == 1) { + queue_.front()->delivered = delivered; + return *(queue_.front()); + } + + auto new_chunk = std::make_unique(); + new_chunk->end_stream = false; + new_chunk->delivered = delivered; + + for (auto it = queue_.begin(); it != queue_.end(); it = queue_.erase(it)) { + new_chunk->data.move((*it)->data); + } + ENVOY_BUG(queue_.empty(), "Did not empty all chunks"); + queue_.push_front(std::move(new_chunk)); + return *(queue_.front()); +} + } // namespace ExternalProcessing } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ext_proc/processor_state.h b/source/extensions/filters/http/ext_proc/processor_state.h index 9ca9cbee66a7f..d3f74746e0bd4 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.h +++ b/source/extensions/filters/http/ext_proc/processor_state.h @@ -30,6 +30,27 @@ class QueuedChunk { }; using QueuedChunkPtr = std::unique_ptr; +class ChunkQueue { +public: + ChunkQueue() = default; + ChunkQueue(const ChunkQueue&) = delete; + ChunkQueue& operator=(const ChunkQueue&) = delete; + uint32_t bytesEnqueued() const { return bytes_enqueued_; } + bool empty() const { return queue_.empty(); } + void push(Buffer::Instance& data, bool end_stream, bool delivered); + absl::optional pop(bool undelivered_only); + const QueuedChunk& consolidate(bool delivered); + +private: + // If we are in either streaming mode, store chunks that we received here, + // and use the "delivered" flag to keep track of which ones were pushed + // to the external processor. When matching responses come back for these + // chunks, then they will be removed. + std::deque queue_; + // The total size of chunks in the queue. + uint32_t bytes_enqueued_{}; +}; + class ProcessorState : public Logger::Loggable { public: // This describes whether the filter is waiting for a response to a gRPC message. @@ -49,6 +70,8 @@ class ProcessorState : public Logger::Loggable { // in which the processing mode was changed while there were outstanding // messages sent to the processor. StreamedBodyCallbackFinishing, + // Waiting for a body callback in "buffered partial" mode. + BufferedPartialBodyCallback, // Waiting for a "trailers" response. TrailersCallback, }; @@ -56,7 +79,7 @@ class ProcessorState : public Logger::Loggable { explicit ProcessorState(Filter& filter) : filter_(filter), watermark_requested_(false), paused_(false), no_body_(false), complete_body_available_(false), trailers_available_(false), body_replaced_(false), - bytes_enqueued_(0) {} + partial_body_processed_(false) {} ProcessorState(const ProcessorState&) = delete; virtual ~ProcessorState() = default; ProcessorState& operator=(const ProcessorState&) = delete; @@ -70,6 +93,7 @@ class ProcessorState : public Logger::Loggable { void setHasNoBody(bool b) { no_body_ = b; } void setTrailersAvailable(bool d) { trailers_available_ = d; } bool bodyReplaced() const { return body_replaced_; } + bool partialBodyProcessed() const { return partial_body_processed_; } virtual void setProcessingMode( const envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode& mode) PURE; @@ -101,10 +125,18 @@ class ProcessorState : public Logger::Loggable { virtual void injectDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE; virtual uint32_t bufferLimit() const PURE; + // Move the contents of "data" into a QueuedChunk object on the streaming queue. void enqueueStreamingChunk(Buffer::Instance& data, bool end_stream, bool delivered); - absl::optional dequeueStreamingChunk(bool undelivered_only); - bool queueOverHighLimit() const { return bytes_enqueued_ > bufferLimit(); } - bool queueBelowLowLimit() const { return bytes_enqueued_ < bufferLimit() / 2; } + // If the queue has chunks, return the head of the queue. + absl::optional dequeueStreamingChunk(bool undelivered_only) { + return chunk_queue_.pop(undelivered_only); + } + // Consolidate all the chunks on the queue into a single one and return a reference. + const QueuedChunk& consolidateStreamedChunks(bool delivered) { + return chunk_queue_.consolidate(delivered); + } + bool queueOverHighLimit() const { return chunk_queue_.bytesEnqueued() > bufferLimit(); } + bool queueBelowLowLimit() const { return chunk_queue_.bytesEnqueued() < bufferLimit() / 2; } virtual Http::HeaderMap* addTrailers() PURE; @@ -141,6 +173,9 @@ class ProcessorState : public Logger::Loggable { bool trailers_available_ : 1; // If true, then a CONTINUE_AND_REPLACE status was used on a response bool body_replaced_ : 1; + // If true, we are in "buffered partial" mode and we already reached the buffer + // limit, sent the body in a message, and got back a reply. + bool partial_body_processed_ : 1; // If true, the server wants to see the headers bool send_headers_ : 1; @@ -153,10 +188,7 @@ class ProcessorState : public Logger::Loggable { Http::RequestOrResponseHeaderMap* headers_ = nullptr; Http::HeaderMap* trailers_ = nullptr; Event::TimerPtr message_timer_; - // A queue of chunks that were sent in streaming mode - std::deque chunks_for_processing_; - // The total size of chunks in the queue - uint32_t bytes_enqueued_; + ChunkQueue chunk_queue_; }; class DecodingProcessorState : public ProcessorState { diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc index a5cbcde53c6a7..3f8421a428fdc 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc @@ -15,8 +15,8 @@ Http::FilterFactoryCb Config::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfig& config, const std::string&, Server::Configuration::FactoryContext&) { return [config](Envoy::Http::FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addStreamFilter( - std::make_shared(config.content_type(), config.withhold_grpc_frames())); + callbacks.addStreamFilter(std::make_shared( + config.content_type(), config.withhold_grpc_frames(), config.response_size_header())); }; } diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc index 1137baf751f8d..307ff18771573 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.cc @@ -1,5 +1,6 @@ #include "source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h" +#include "envoy/http/filter.h" #include "envoy/http/header_map.h" #include "source/common/common/enum_to_int.h" @@ -23,6 +24,13 @@ struct RcDetailsValues { const std::string GrpcBridgeFailedTooSmall = "grpc_bridge_data_too_small"; // The gRPC HTTP/1 bridge encountered an unsupported content type. const std::string GrpcBridgeFailedContentType = "grpc_bridge_content_type_wrong"; + // The gRPC HTTP/1 bridge expected the upstream to set a header indicating + // the content length, but it did not. + const std::string GrpcBridgeFailedMissingContentLength = "grpc_bridge_content_length_missing"; + // The gRPC HTTP/1 bridge expected the upstream to set a header indicating + // the content length, but it sent a value different than the actual response + // payload size. + const std::string GrpcBridgeFailedWrongContentLength = "grpc_bridge_content_length_wrong"; }; using RcDetails = ConstSingleton; @@ -144,9 +152,30 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers headers.setContentType(content_type_); if (withhold_grpc_frames_) { - // Adjust content-length to account for the frame header that's added. - adjustContentLength(headers, - [](auto length) { return length + Grpc::GRPC_FRAME_HEADER_SIZE; }); + // If the upstream should set a header indicating the response size, use that. + if (response_size_header_) { + auto length_headers = headers.get(*response_size_header_); + if (!length_headers.empty() && + // In the case of repeated inline headers, we only use the first value. + absl::SimpleAtoi(length_headers[0]->value().getStringView().substr( + 0, length_headers[0]->value().getStringView().find(',')), + &response_message_length_)) { + headers.setContentLength(response_message_length_ + Grpc::GRPC_FRAME_HEADER_SIZE); + } else { + // If the response from upstream does not specify the content length, stand in an error + // message. + decoder_callbacks_->sendLocalReply( + Http::Code::OK, "envoy reverse bridge: upstream did not set content length", nullptr, + Grpc::Status::WellKnownGrpcStatus::Internal, + RcDetails::get().GrpcBridgeFailedMissingContentLength); + return Http::FilterHeadersStatus::StopIteration; + } + } else { + // If we are buffering the response, adjust content-length to account for the frame header + // that's added. + adjustContentLength(headers, + [](auto length) { return length + Grpc::GRPC_FRAME_HEADER_SIZE; }); + } } // We can only insert trailers at the end of data, so keep track of this value // until then. @@ -157,25 +186,46 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers } Http::FilterDataStatus Filter::encodeData(Buffer::Instance& buffer, bool end_stream) { + upstream_response_bytes_ += buffer.length(); if (!enabled_) { return Http::FilterDataStatus::Continue; } + // If we're getting the response size from an upstream header, we can stream the response. The + // first chunk of data we encode needs the gRPC frame header prepended. + if (withhold_grpc_frames_ && response_size_header_ && !frame_header_added_) { + buildGrpcFrameHeader(buffer, response_message_length_); + frame_header_added_ = true; + } + if (end_stream) { // Insert grpc-status trailers to communicate the error code. auto& trailers = encoder_callbacks_->addEncodedTrailers(); trailers.setGrpcStatus(grpc_status_); if (withhold_grpc_frames_) { - buffer.prepend(buffer_); - buildGrpcFrameHeader(buffer); + if (response_size_header_) { + if (upstream_response_bytes_ != response_message_length_) { + encoder_callbacks_->sendLocalReply( + Http::Code::OK, "envoy reverse bridge: upstream set incorrect content length", + nullptr, Grpc::Status::WellKnownGrpcStatus::Internal, + RcDetails::get().GrpcBridgeFailedWrongContentLength); + return Http::FilterDataStatus::StopIterationNoBuffer; + } + } else { + buffer.prepend(buffer_); + buildGrpcFrameHeader(buffer, buffer.length()); + } } return Http::FilterDataStatus::Continue; } - // We only need to buffer if we're responsible for injecting the gRPC frame header. if (withhold_grpc_frames_) { + if (response_size_header_) { + return Http::FilterDataStatus::Continue; + } + // Buffer the response in a mutable buffer: we need to determine the size of the response // and modify it later on. buffer_.move(buffer); @@ -192,21 +242,21 @@ Http::FilterTrailersStatus Filter::encodeTrailers(Http::ResponseTrailerMap& trai trailers.setGrpcStatus(grpc_status_); - if (withhold_grpc_frames_) { - buildGrpcFrameHeader(buffer_); + if (withhold_grpc_frames_ && !response_size_header_) { + buildGrpcFrameHeader(buffer_, buffer_.length()); encoder_callbacks_->addEncodedData(buffer_, false); } return Http::FilterTrailersStatus::Continue; } -void Filter::buildGrpcFrameHeader(Buffer::Instance& buffer) { +void Filter::buildGrpcFrameHeader(Buffer::Instance& buffer, uint32_t message_length) { // We do this even if the upstream failed: If the response returned non-200, // we'll respond with a grpc-status with an error, so clients will know that the request // was unsuccessful. Since we're guaranteed at this point to have a valid response // (unless upstream lied in content-type) we attempt to return a well-formed gRPC // response body. - Grpc::Encoder().prependFrameHeader(Grpc::GRPC_FH_DEFAULT, buffer); + Grpc::Encoder().prependFrameHeader(Grpc::GRPC_FH_DEFAULT, buffer, message_length); } } // namespace GrpcHttp1ReverseBridge diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h index aed2ecdd34edf..723dba1f90962 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/filter.h @@ -10,6 +10,8 @@ #include "source/common/grpc/status.h" #include "source/extensions/filters/http/common/pass_through_filter.h" +#include "absl/types/optional.h" + namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -18,9 +20,13 @@ namespace GrpcHttp1ReverseBridge { // When enabled, will downgrade an incoming gRPC http request into a h/1.1 request. class Filter : public Envoy::Http::PassThroughFilter { public: - Filter(std::string upstream_content_type, bool withhold_grpc_frames) + Filter(std::string upstream_content_type, bool withhold_grpc_frames, + std::string response_size_header) : upstream_content_type_(std::move(upstream_content_type)), - withhold_grpc_frames_(withhold_grpc_frames) {} + withhold_grpc_frames_(withhold_grpc_frames), + response_size_header_(!response_size_header.empty() + ? absl::make_optional(Http::LowerCaseString(response_size_header)) + : absl::nullopt) {} // Http::StreamDecoderFilter Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) override; @@ -34,13 +40,23 @@ class Filter : public Envoy::Http::PassThroughFilter { private: // Prepend the grpc frame into the buffer - void buildGrpcFrameHeader(Buffer::Instance& buffer); + void buildGrpcFrameHeader(Buffer::Instance& buffer, uint32_t message_length); const std::string upstream_content_type_; const bool withhold_grpc_frames_; + const absl::optional response_size_header_; bool enabled_{}; bool prefix_stripped_{}; + + // Tracking state for gRPC frame status when withholding gRPC frames from the + // upstream and streaming responses. + bool frame_header_added_{}; + // The content length reported by the upstream. + uint32_t response_message_length_{}; + // The actual size of the response returned by the upstream so far. + uint32_t upstream_response_bytes_{}; + std::string content_type_{}; Grpc::Status::GrpcStatus grpc_status_{}; // Normally we'd use the encoding buffer, but since we need to mutate the diff --git a/source/extensions/filters/http/health_check/BUILD b/source/extensions/filters/http/health_check/BUILD index 3a6077d479d14..1ecafee6b332c 100644 --- a/source/extensions/filters/http/health_check/BUILD +++ b/source/extensions/filters/http/health_check/BUILD @@ -39,7 +39,7 @@ envoy_cc_extension( hdrs = ["config.h"], # Legacy test use. TODO(#9953) clean up. extra_visibility = [ - "//test/common/filter/http:__subpackages__", + "//test/common/filter:__subpackages__", "//test/integration:__subpackages__", "//test/server:__subpackages__", ], diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index 50f5952b2f707..915011c82f4b8 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( deps = [ "//source/common/http:header_utility_lib", "//source/common/http:utility_lib", + "@com_google_absl//absl/container:btree", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index 8b78af98d0a9b..bfa03f0ac4f05 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -10,7 +10,7 @@ #include "source/common/http/utility.h" #include "source/common/singleton/const_singleton.h" -#include "absl/container/node_hash_set.h" +#include "absl/container/btree_map.h" #include "absl/strings/match.h" using envoy::extensions::filters::http::jwt_authn::v3::JwtProvider; @@ -111,6 +111,18 @@ class JwtParamLocation : public JwtLocationBase { } }; +// The JwtLocation for cookie extraction. +class JwtCookieLocation : public JwtLocationBase { +public: + JwtCookieLocation(const std::string& token, const JwtIssuerChecker& issuer_checker) + : JwtLocationBase(token, issuer_checker) {} + + void removeJwt(Http::HeaderMap&) const override { + // TODO(theshubhamp): remove JWT from cookies. + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } +}; + /** * The class implements Extractor interface * @@ -133,6 +145,8 @@ class ExtractorImpl : public Logger::Loggable, public Extractor const std::string& value_prefix); // add a query param config void addQueryParamConfig(const std::string& issuer, const std::string& param); + // add a query param config + void addCookieConfig(const std::string& issuer, const std::string& cookie); // ctor helper for a jwt provider config void addProvider(const JwtProvider& provider); @@ -164,6 +178,14 @@ class ExtractorImpl : public Logger::Loggable, public Extractor // The map of a parameter key to set of issuers specified the parameter std::map param_locations_; + // CookieMap value type to store issuers that specified this cookie. + struct CookieLocationSpec { + // Issuers that specified this param. + JwtIssuerChecker issuer_checker_; + }; + // The map of a cookie key to set of issuers specified the cookie. + absl::btree_map cookie_locations_; + std::vector forward_payload_headers_; }; @@ -183,6 +205,9 @@ void ExtractorImpl::addProvider(const JwtProvider& provider) { for (const std::string& param : provider.from_params()) { addQueryParamConfig(provider.issuer(), param); } + for (const std::string& cookie : provider.from_cookies()) { + addCookieConfig(provider.issuer(), cookie); + } // If not specified, use default locations. if (provider.from_headers().empty() && provider.from_params().empty()) { addHeaderConfig(provider.issuer(), Http::CustomHeaders::get().Authorization, @@ -210,6 +235,11 @@ void ExtractorImpl::addQueryParamConfig(const std::string& issuer, const std::st param_location_spec.issuer_checker_.add(issuer); } +void ExtractorImpl::addCookieConfig(const std::string& issuer, const std::string& cookie) { + auto& cookie_location_spec = cookie_locations_[cookie]; + cookie_location_spec.issuer_checker_.add(issuer); +} + std::vector ExtractorImpl::extract(const Http::RequestHeaderMap& headers) const { std::vector tokens; @@ -235,22 +265,37 @@ ExtractorImpl::extract(const Http::RequestHeaderMap& headers) const { } } - // If no query parameter locations specified, or Path() is null, bail out - if (param_locations_.empty() || headers.Path() == nullptr) { - return tokens; + // Check query parameter locations only if query parameter locations specified and Path() is not + // null + if (!param_locations_.empty() && headers.Path() != nullptr) { + const auto& params = Http::Utility::parseAndDecodeQueryString(headers.getPathValue()); + for (const auto& location_it : param_locations_) { + const auto& param_key = location_it.first; + const auto& location_spec = location_it.second; + const auto& it = params.find(param_key); + if (it != params.end()) { + tokens.push_back(std::make_unique( + it->second, location_spec.issuer_checker_, param_key)); + } + } } - // Check query parameter locations. - const auto& params = Http::Utility::parseAndDecodeQueryString(headers.getPathValue()); - for (const auto& location_it : param_locations_) { - const auto& param_key = location_it.first; - const auto& location_spec = location_it.second; - const auto& it = params.find(param_key); - if (it != params.end()) { - tokens.push_back(std::make_unique( - it->second, location_spec.issuer_checker_, param_key)); + // Check cookie locations. + if (!cookie_locations_.empty()) { + const auto& cookies = Http::Utility::parseCookies( + headers, [&](absl::string_view k) -> bool { return cookie_locations_.contains(k); }); + + for (const auto& location_it : cookie_locations_) { + const auto& cookie_key = location_it.first; + const auto& location_spec = location_it.second; + const auto& it = cookies.find(cookie_key); + if (it != cookies.end()) { + tokens.push_back( + std::make_unique(it->second, location_spec.issuer_checker_)); + } } } + return tokens; } diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 8ff3ded5a54e6..5c78826978676 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -50,10 +50,10 @@ bool allowDeprecatedMetadataName() { } const ProtobufWkt::Struct& getMetadata(Http::StreamFilterCallbacks* callbacks) { - if (callbacks->route() == nullptr || callbacks->route()->routeEntry() == nullptr) { + if (callbacks->route() == nullptr) { return ProtobufWkt::Struct::default_instance(); } - const auto& metadata = callbacks->route()->routeEntry()->metadata(); + const auto& metadata = callbacks->route()->metadata(); { const auto& filter_it = metadata.filter_metadata().find("envoy.filters.http.lua"); diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index bf70a111e2180..3f2c02a2fe5da 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -114,7 +114,7 @@ int StreamInfoWrapper::luaDynamicMetadata(lua_State* state) { } int StreamInfoWrapper::luaDownstreamSslConnection(lua_State* state) { - const auto& ssl = stream_info_.downstreamSslConnection(); + const auto& ssl = stream_info_.downstreamAddressProvider().sslConnection(); if (ssl != nullptr) { if (downstream_ssl_connection_.get() != nullptr) { downstream_ssl_connection_.pushStack(); diff --git a/source/extensions/filters/http/oauth2/BUILD b/source/extensions/filters/http/oauth2/BUILD index e0d8962aca9cb..1d2ba1439de7d 100644 --- a/source/extensions/filters/http/oauth2/BUILD +++ b/source/extensions/filters/http/oauth2/BUILD @@ -67,7 +67,6 @@ envoy_cc_extension( ":oauth_lib", "//envoy/registry", "//source/extensions/filters/http/common:factory_base_lib", - "@envoy_api//envoy/api/v2/auth:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/oauth2/v3alpha:pkg_cc_proto", ], ) diff --git a/source/extensions/filters/http/oauth2/config.cc b/source/extensions/filters/http/oauth2/config.cc index ef8cfc364cb37..74bc5567504ad 100644 --- a/source/extensions/filters/http/oauth2/config.cc +++ b/source/extensions/filters/http/oauth2/config.cc @@ -4,7 +4,6 @@ #include #include -#include "envoy/api/v2/auth/secret.pb.h" #include "envoy/common/exception.h" #include "envoy/extensions/filters/http/oauth2/v3alpha/oauth.pb.validate.h" #include "envoy/registry/registry.h" diff --git a/source/extensions/filters/http/oauth2/filter.cc b/source/extensions/filters/http/oauth2/filter.cc index 8a42db24cd90e..d0234dd733b09 100644 --- a/source/extensions/filters/http/oauth2/filter.cc +++ b/source/extensions/filters/http/oauth2/filter.cc @@ -107,6 +107,12 @@ std::string encodeResourceList(const Protobuf::RepeatedPtrField& re void setBearerToken(Http::RequestHeaderMap& headers, const std::string& token) { headers.setInline(authorization_handle.handle(), absl::StrCat("Bearer ", token)); } + +std::string findValue(const absl::flat_hash_map& map, + const std::string& key) { + const auto value_it = map.find(key); + return value_it != map.end() ? value_it->second : EMPTY_STRING; +} } // namespace FilterConfig::FilterConfig( @@ -138,9 +144,13 @@ FilterStats FilterConfig::generateStats(const std::string& prefix, Stats::Scope& void OAuth2CookieValidator::setParams(const Http::RequestHeaderMap& headers, const std::string& secret) { - expires_ = Http::Utility::parseCookieValue(headers, "OauthExpires"); - token_ = Http::Utility::parseCookieValue(headers, "BearerToken"); - hmac_ = Http::Utility::parseCookieValue(headers, "OauthHMAC"); + const auto& cookies = Http::Utility::parseCookies(headers, [](absl::string_view key) -> bool { + return key == "OauthExpires" || key == "BearerToken" || key == "OauthHMAC"; + }); + + expires_ = findValue(cookies, "OauthExpires"); + token_ = findValue(cookies, "BearerToken"); + hmac_ = findValue(cookies, "OauthHMAC"); host_ = headers.Host()->value().getStringView(); secret_.assign(secret.begin(), secret.end()); diff --git a/source/extensions/filters/http/ratelimit/config.cc b/source/extensions/filters/http/ratelimit/config.cc index 0db542d17a9ac..b49ea57da2419 100644 --- a/source/extensions/filters/http/ratelimit/config.cc +++ b/source/extensions/filters/http/ratelimit/config.cc @@ -27,14 +27,12 @@ Http::FilterFactoryCb RateLimitFilterConfig::createFilterFactoryFromProtoTyped( const std::chrono::milliseconds timeout = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20)); + Config::Utility::checkTransportVersion(proto_config.rate_limit_service()); return [proto_config, &context, timeout, - transport_version = - Config::Utility::getAndCheckTransportVersion(proto_config.rate_limit_service()), filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared( filter_config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout, - transport_version))); + context, proto_config.rate_limit_service().grpc_service(), timeout))); }; } diff --git a/source/extensions/filters/http/rbac/rbac_filter.cc b/source/extensions/filters/http/rbac/rbac_filter.cc index 5b385758ec3d9..da1d64d1f520a 100644 --- a/source/extensions/filters/http/rbac/rbac_filter.cc +++ b/source/extensions/filters/http/rbac/rbac_filter.cc @@ -46,7 +46,7 @@ RoleBasedAccessControlFilter::decodeHeaders(Http::RequestHeaderMap& headers, boo "checking request: requestedServerName: {}, sourceIP: {}, directRemoteIP: {}, remoteIP: {}," "localAddress: {}, ssl: {}, headers: {}, dynamicMetadata: {}", callbacks_->connection()->requestedServerName(), - callbacks_->connection()->addressProvider().remoteAddress()->asString(), + callbacks_->connection()->connectionInfoProvider().remoteAddress()->asString(), callbacks_->streamInfo().downstreamAddressProvider().directRemoteAddress()->asString(), callbacks_->streamInfo().downstreamAddressProvider().remoteAddress()->asString(), callbacks_->streamInfo().downstreamAddressProvider().localAddress()->asString(), diff --git a/source/extensions/filters/listener/original_dst/original_dst.cc b/source/extensions/filters/listener/original_dst/original_dst.cc index 1b737c2c2a1a4..ccb0f93a5ff06 100644 --- a/source/extensions/filters/listener/original_dst/original_dst.cc +++ b/source/extensions/filters/listener/original_dst/original_dst.cc @@ -64,7 +64,7 @@ Network::FilterStatus OriginalDstFilter::onAccept(Network::ListenerFilterCallbac } #endif // Restore the local address to the original one. - socket.addressProvider().restoreLocalAddress(original_local_address); + socket.connectionInfoProvider().restoreLocalAddress(original_local_address); } } diff --git a/source/extensions/filters/listener/original_src/original_src.cc b/source/extensions/filters/listener/original_src/original_src.cc index eefba846fcc8f..e768d3127d277 100644 --- a/source/extensions/filters/listener/original_src/original_src.cc +++ b/source/extensions/filters/listener/original_src/original_src.cc @@ -15,7 +15,7 @@ OriginalSrcFilter::OriginalSrcFilter(const Config& config) : config_(config) {} Network::FilterStatus OriginalSrcFilter::onAccept(Network::ListenerFilterCallbacks& cb) { auto& socket = cb.socket(); - auto address = socket.addressProvider().remoteAddress(); + auto address = socket.connectionInfoProvider().remoteAddress(); ASSERT(address); ENVOY_LOG(debug, diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index 6e9fa236f38d0..0d8082a079fe2 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -127,10 +127,12 @@ ReadOrParseState Filter::onReadWorker() { // Only set the local address if it really changed, and mark it as address being restored. if (*proxy_protocol_header_.value().local_address_ != - *socket.addressProvider().localAddress()) { - socket.addressProvider().restoreLocalAddress(proxy_protocol_header_.value().local_address_); + *socket.connectionInfoProvider().localAddress()) { + socket.connectionInfoProvider().restoreLocalAddress( + proxy_protocol_header_.value().local_address_); } - socket.addressProvider().setRemoteAddress(proxy_protocol_header_.value().remote_address_); + socket.connectionInfoProvider().setRemoteAddress( + proxy_protocol_header_.value().remote_address_); } // Release the file event so that we do not interfere with the connection read events. diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc index 1fb12918e1f7c..669cc315ed3e0 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc @@ -113,7 +113,7 @@ void ClientSslAuthFilter::onEvent(Network::ConnectionEvent event) { ASSERT(read_callbacks_->connection().ssl()); if (config_->ipAllowlist().contains( - *read_callbacks_->connection().addressProvider().remoteAddress())) { + *read_callbacks_->connection().connectionInfoProvider().remoteAddress())) { config_->stats().auth_ip_allowlist_.inc(); read_callbacks_->continueReading(); return; diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc index 8366759269eaa..e828884ff26fe 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.cc +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -140,7 +140,6 @@ void ActiveMessageDecoderFilter::continueDecoding() { // If the filter stack was paused during messageEnd, handle end-of-request details. parent_.finalizeRequest(); } - parent_.continueDecoding(); } } @@ -185,7 +184,7 @@ ActiveMessage::ActiveMessage(ConnectionManager& parent) : parent_(parent), request_timer_(std::make_unique( parent_.stats().request_time_ms_, parent.timeSystem())), request_id_(-1), stream_id_(parent.randomGenerator().random()), - stream_info_(parent.timeSystem(), parent_.connection().addressProviderSharedPtr()), + stream_info_(parent.timeSystem(), parent_.connection().connectionInfoProviderSharedPtr()), pending_stream_decoded_(false), local_response_sent_(false) { parent_.stats().request_active_.inc(); } @@ -414,8 +413,6 @@ uint64_t ActiveMessage::requestId() const { uint64_t ActiveMessage::streamId() const { return stream_id_; } -void ActiveMessage::continueDecoding() { parent_.continueDecoding(); } - SerializationType ActiveMessage::serializationType() const { return parent_.downstreamSerializationType(); } diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.h b/source/extensions/filters/network/dubbo_proxy/active_message.h index 9a043d8656d2b..5e860be3ddc23 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.h +++ b/source/extensions/filters/network/dubbo_proxy/active_message.h @@ -133,7 +133,6 @@ using ActiveMessageEncoderFilterPtr = std::unique_ptr, public Event::DeferredDeletable, public StreamHandler, - public DubboFilters::DecoderFilterCallbacks, public DubboFilters::FilterChainFactoryCallbacks, Logger::Loggable { public: @@ -158,21 +157,19 @@ class ActiveMessage : public LinkedObject, // StreamHandler void onStreamDecoded(MessageMetadataSharedPtr metadata, ContextSharedPtr ctx) override; - // DubboFilters::DecoderFilterCallbacks - uint64_t requestId() const override; - uint64_t streamId() const override; - const Network::Connection* connection() const override; - void continueDecoding() override; - SerializationType serializationType() const override; - ProtocolType protocolType() const override; - StreamInfo::StreamInfo& streamInfo() override; - Router::RouteConstSharedPtr route() override; - void sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) override; - void startUpstreamResponse() override; - DubboFilters::UpstreamResponseStatus upstreamData(Buffer::Instance& buffer) override; - void resetDownstreamConnection() override; - Event::Dispatcher& dispatcher() override; - void resetStream() override; + uint64_t requestId() const; + uint64_t streamId() const; + const Network::Connection* connection() const; + SerializationType serializationType() const; + ProtocolType protocolType() const; + StreamInfo::StreamInfo& streamInfo(); + Router::RouteConstSharedPtr route(); + void sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream); + void startUpstreamResponse(); + DubboFilters::UpstreamResponseStatus upstreamData(Buffer::Instance& buffer); + void resetDownstreamConnection(); + Event::Dispatcher& dispatcher(); + void resetStream(); void createFilterChain(); FilterStatus applyDecoderFilters(ActiveMessageDecoderFilter* filter, diff --git a/source/extensions/filters/network/dubbo_proxy/config.cc b/source/extensions/filters/network/dubbo_proxy/config.cc index 4743f7e06064f..8bc19cd46663a 100644 --- a/source/extensions/filters/network/dubbo_proxy/config.cc +++ b/source/extensions/filters/network/dubbo_proxy/config.cc @@ -144,7 +144,6 @@ void ConfigImpl::registerFilter(const DubboFilterConfig& proto_config) { string_name); ProtobufTypes::MessagePtr message = factory.createEmptyConfigProto(); Envoy::Config::Utility::translateOpaqueConfig(proto_config.config(), - ProtobufWkt::Struct::default_instance(), context_.messageValidationVisitor(), *message); DubboFilters::FilterFactoryCb callback = factory.createFilterFactoryFromProto(*message, stats_prefix_, context_); diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc index 3e259a18a6cb7..1806fd82da06c 100644 --- a/source/extensions/filters/network/dubbo_proxy/conn_manager.cc +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.cc @@ -29,20 +29,7 @@ Network::FilterStatus ConnectionManager::onData(Buffer::Instance& data, bool end dispatch(); if (end_stream) { - ENVOY_CONN_LOG(trace, "downstream half-closed", read_callbacks_->connection()); - - // Downstream has closed. Unless we're waiting for an upstream connection to complete a oneway - // request, close. The special case for oneway requests allows them to complete before the - // ConnectionManager is destroyed. - if (stopped_) { - ASSERT(!active_message_list_.empty()); - auto metadata = (*active_message_list_.begin())->metadata(); - if (metadata && metadata->messageType() == MessageType::Oneway) { - ENVOY_CONN_LOG(trace, "waiting for one-way completion", read_callbacks_->connection()); - half_closed_ = true; - return Network::FilterStatus::StopIteration; - } - } + ENVOY_CONN_LOG(trace, "downstream closed", read_callbacks_->connection()); ENVOY_LOG(debug, "dubbo: end data processing"); resetAllMessages(false); @@ -110,11 +97,6 @@ void ConnectionManager::dispatch() { return; } - if (stopped_) { - ENVOY_CONN_LOG(debug, "dubbo: dubbo filter stopped", read_callbacks_->connection()); - return; - } - try { bool underflow = false; while (!underflow) { @@ -166,19 +148,6 @@ void ConnectionManager::sendLocalReply(MessageMetadata& metadata, } } -void ConnectionManager::continueDecoding() { - ENVOY_CONN_LOG(debug, "dubbo filter continued", read_callbacks_->connection()); - stopped_ = false; - dispatch(); - - if (!stopped_ && half_closed_) { - // If we're half closed, but not stopped waiting for an upstream, - // reset any pending rpcs and close the connection. - resetAllMessages(false); - read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); - } -} - void ConnectionManager::deferredMessage(ActiveMessage& message) { if (!message.inserted()) { return; diff --git a/source/extensions/filters/network/dubbo_proxy/conn_manager.h b/source/extensions/filters/network/dubbo_proxy/conn_manager.h index 827f74571f8fb..5824583141344 100644 --- a/source/extensions/filters/network/dubbo_proxy/conn_manager.h +++ b/source/extensions/filters/network/dubbo_proxy/conn_manager.h @@ -72,7 +72,6 @@ class ConnectionManager : public Network::ReadFilter, SerializationType downstreamSerializationType() const { return protocol_->serializer()->type(); } ProtocolType downstreamProtocolType() const { return protocol_->type(); } - void continueDecoding(); void deferredMessage(ActiveMessage& message); void sendLocalReply(MessageMetadata& metadata, const DubboFilters::DirectResponse& response, bool end_stream); @@ -87,9 +86,6 @@ class ConnectionManager : public Network::ReadFilter, Buffer::OwnedImpl request_buffer_; std::list active_message_list_; - bool stopped_{false}; - bool half_closed_{false}; - Config& config_; TimeSource& time_system_; DubboFilterStats& stats_; diff --git a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h index 1a36230b8c5b3..f19f7b9d13eb6 100644 --- a/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h +++ b/source/extensions/filters/network/dubbo_proxy/router/route_matcher.h @@ -123,7 +123,7 @@ class MethodRouteEntryImpl : public RouteEntryImplBase { uint64_t random_value) const override; private: - const Matchers::StringMatcherImpl method_name_; + const Matchers::StringMatcherImpl method_name_; std::shared_ptr parameter_route_; }; diff --git a/source/extensions/filters/network/ext_authz/config.cc b/source/extensions/filters/network/ext_authz/config.cc index ce3bf91c9b6dc..f2e074a4c64a5 100644 --- a/source/extensions/filters/network/ext_authz/config.cc +++ b/source/extensions/filters/network/ext_authz/config.cc @@ -27,16 +27,16 @@ Network::FilterFactoryCb ExtAuthzConfigFactory::createFilterFactoryFromProtoType proto_config, context.scope(), context.getServerFactoryContext().bootstrap()); const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, 200); + Envoy::Config::Utility::checkTransportVersion(proto_config); return [grpc_service = proto_config.grpc_service(), &context, ext_authz_config, - transport_api_version = Envoy::Config::Utility::getAndCheckTransportVersion(proto_config), timeout_ms](Network::FilterManager& filter_manager) -> void { auto async_client_factory = context.clusterManager().grpcAsyncClientManager().factoryForGrpcService( grpc_service, context.scope(), true); auto client = std::make_unique( - async_client_factory->createUncachedRawAsyncClient(), std::chrono::milliseconds(timeout_ms), - transport_api_version); + async_client_factory->createUncachedRawAsyncClient(), + std::chrono::milliseconds(timeout_ms)); filter_manager.addReadFilter(Network::ReadFilterSharedPtr{ std::make_shared(ext_authz_config, std::move(client))}); }; diff --git a/source/extensions/filters/network/ext_authz/ext_authz.cc b/source/extensions/filters/network/ext_authz/ext_authz.cc index 670f77a995646..b59a2bf04a4c8 100644 --- a/source/extensions/filters/network/ext_authz/ext_authz.cc +++ b/source/extensions/filters/network/ext_authz/ext_authz.cc @@ -83,6 +83,11 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { break; } + if (!response->dynamic_metadata.fields().empty()) { + filter_callbacks_->connection().streamInfo().setDynamicMetadata( + NetworkFilterNames::get().ExtAuthorization, response->dynamic_metadata); + } + // Fail open only if configured to do so and if the check status was a error. if (response->status == Filters::Common::ExtAuthz::CheckStatus::Denied || (response->status == Filters::Common::ExtAuthz::CheckStatus::Error && @@ -98,11 +103,6 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { config_->stats().failure_mode_allowed_.inc(); } - if (!response->dynamic_metadata.fields().empty()) { - filter_callbacks_->connection().streamInfo().setDynamicMetadata( - NetworkFilterNames::get().ExtAuthorization, response->dynamic_metadata); - } - // We can get completion inline, so only call continue if that isn't happening. if (!calling_check_) { filter_callbacks_->continueReading(); diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index 5751aa3967848..e4945a15788e4 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -36,7 +36,7 @@ envoy_cc_extension( "//source/common/access_log:access_log_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:utility_lib", - "//source/common/filter/http:filter_config_discovery_lib", + "//source/common/filter:config_discovery_lib", "//source/common/http:conn_manager_lib", "//source/common/http:default_server_string_lib", "//source/common/http:request_id_extension_lib", diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index d1ebbddaea137..53223b12160c3 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -20,7 +20,7 @@ #include "source/common/access_log/access_log_impl.h" #include "source/common/common/fmt.h" #include "source/common/config/utility.h" -#include "source/common/filter/http/filter_config_discovery_impl.h" +#include "source/common/filter/config_discovery_impl.h" #include "source/common/http/conn_manager_config.h" #include "source/common/http/conn_manager_utility.h" #include "source/common/http/default_server_string.h" @@ -176,10 +176,10 @@ Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryCont context.getServerFactoryContext(), context.messageValidationVisitor())); }); - std::shared_ptr filter_config_provider_manager = - context.singletonManager().getTyped( + std::shared_ptr filter_config_provider_manager = + context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(filter_config_provider_manager), - [] { return std::make_shared(); }); + [] { return std::make_shared(); }); return {date_provider, route_config_provider_manager, scoped_routes_config_provider_manager, http_tracer_manager, filter_config_provider_manager}; @@ -192,7 +192,7 @@ std::shared_ptr Utility::createConfig( Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, Tracing::HttpTracerManager& http_tracer_manager, - Filter::Http::FilterConfigProviderManager& filter_config_provider_manager) { + FilterConfigProviderManager& filter_config_provider_manager) { return std::make_shared( proto_config, context, date_provider, route_config_provider_manager, scoped_routes_config_provider_manager, http_tracer_manager, filter_config_provider_manager); @@ -263,7 +263,7 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, Tracing::HttpTracerManager& http_tracer_manager, - Filter::Http::FilterConfigProviderManager& filter_config_provider_manager) + FilterConfigProviderManager& filter_config_provider_manager) : context_(context), stats_prefix_(fmt::format("http.{}.", config.stat_prefix())), stats_(Http::ConnectionManagerImpl::generateStats(stats_prefix_, context_.scope())), tracing_stats_( @@ -373,6 +373,16 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( auto* extension = ip_detection_extensions.Add(); extension->set_name("envoy.http.original_ip_detection.xff"); extension->mutable_typed_config()->PackFrom(xff_config); + } else { + if (use_remote_address_) { + throw EnvoyException( + "Original IP detection extensions and use_remote_address may not be mixed"); + } + + if (xff_num_trusted_hops_ > 0) { + throw EnvoyException( + "Original IP detection extensions and xff_num_trusted_hops may not be mixed"); + } } original_ip_detection_extensions_.reserve(ip_detection_extensions.size()); @@ -469,18 +479,8 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( // Listener level traffic direction overrides the operation name switch (context.direction()) { case envoy::config::core::v3::UNSPECIFIED: { - switch (tracing_config.hidden_envoy_deprecated_operation_name()) { - case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: - Tracing::INGRESS: - tracing_operation_name = Tracing::OperationName::Ingress; - break; - case envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: - Tracing::EGRESS: - tracing_operation_name = Tracing::OperationName::Egress; - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } + // Continuing legacy behavior; if unspecified, we treat this as ingress. + tracing_operation_name = Tracing::OperationName::Ingress; break; } case envoy::config::core::v3::INBOUND: @@ -494,13 +494,6 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( } Tracing::CustomTagMap custom_tags; - for (const std::string& header : - tracing_config.hidden_envoy_deprecated_request_headers_for_tags()) { - envoy::type::tracing::v3::CustomTag::Header headerTag; - headerTag.set_name(header); - custom_tags.emplace( - header, std::make_shared(header, headerTag)); - } for (const auto& tag : tracing_config.custom_tags()) { custom_tags.emplace(tag.tag(), Tracing::HttpTracerUtility::createCustomTag(tag)); } @@ -565,6 +558,9 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( HTTP3: #ifdef ENVOY_ENABLE_QUIC codec_type_ = CodecType::HTTP3; + if (!context_.isQuicListener()) { + throw EnvoyException("HTTP/3 codec configured on non-QUIC listener."); + } #else throw EnvoyException("HTTP3 configured but not enabled in the build."); #endif @@ -572,6 +568,9 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( default: NOT_REACHED_GCOVR_EXCL_LINE; } + if (codec_type_ != CodecType::HTTP3 && context_.isQuicListener()) { + throw EnvoyException("Non-HTTP/3 codec configured on QUIC listener."); + } const auto& filters = config.http_filters(); DependencyManager dependency_manager; @@ -655,11 +654,7 @@ void HttpConnectionManagerConfig::processFilter( ENVOY_LOG(debug, " name: {}", filter_config_provider->name()); ENVOY_LOG(debug, " config: {}", MessageUtil::getJsonStringFromMessageOrError( - proto_config.has_typed_config() - ? static_cast(proto_config.typed_config()) - : static_cast( - proto_config.hidden_envoy_deprecated_config()), - true)); + static_cast(proto_config.typed_config()), true)); filter_factories.push_back(std::move(filter_config_provider)); } diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 4cf54d0618a8d..e480599fefd98 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -11,7 +11,7 @@ #include "envoy/config/core/v3/extension.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" -#include "envoy/filter/http/filter_config_provider.h" +#include "envoy/filter/config_provider_manager.h" #include "envoy/http/filter.h" #include "envoy/http/original_ip_detection.h" #include "envoy/http/request_id_extension.h" @@ -39,6 +39,8 @@ namespace Extensions { namespace NetworkFilters { namespace HttpConnectionManager { +using FilterConfigProviderManager = Filter::FilterConfigProviderManager; + /** * Config registration for the HTTP connection manager filter. @see NamedNetworkFilterConfigFactory. */ @@ -73,7 +75,7 @@ class MobileHttpConnectionManagerFilterConfigFactory EnvoyMobileHttpConnectionManager> { public: MobileHttpConnectionManagerFilterConfigFactory() - : FactoryBase(NetworkFilterNames::get().HttpConnectionManager, true) {} + : FactoryBase(NetworkFilterNames::get().EnvoyMobileHttpConnectionManager, true) {} private: Network::FilterFactoryCb createFilterFactoryFromProtoTyped( @@ -119,11 +121,11 @@ class HttpConnectionManagerConfig : Logger::Loggable, Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, Tracing::HttpTracerManager& http_tracer_manager, - Filter::Http::FilterConfigProviderManager& filter_config_provider_manager); + FilterConfigProviderManager& filter_config_provider_manager); // Http::FilterChainFactory void createFilterChain(Http::FilterChainFactoryCallbacks& callbacks) override; - using FilterFactoriesList = std::list; + using FilterFactoriesList = std::list; struct FilterConfig { std::unique_ptr filter_factories; bool allow_upgrade; @@ -264,7 +266,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, std::vector set_current_client_cert_details_; Router::RouteConfigProviderManager& route_config_provider_manager_; Config::ConfigProviderManager& scoped_routes_config_provider_manager_; - Filter::Http::FilterConfigProviderManager& filter_config_provider_manager_; + FilterConfigProviderManager& filter_config_provider_manager_; CodecType codec_type_; envoy::config::core::v3::Http3ProtocolOptions http3_options_; envoy::config::core::v3::Http2ProtocolOptions http2_options_; @@ -337,7 +339,7 @@ class Utility { Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager_; Router::ScopedRoutesConfigProviderManagerSharedPtr scoped_routes_config_provider_manager_; Tracing::HttpTracerManagerSharedPtr http_tracer_manager_; - std::shared_ptr filter_config_provider_manager_; + std::shared_ptr filter_config_provider_manager_; }; /** @@ -365,7 +367,7 @@ class Utility { Router::RouteConfigProviderManager& route_config_provider_manager, Config::ConfigProviderManager& scoped_routes_config_provider_manager, Tracing::HttpTracerManager& http_tracer_manager, - Filter::Http::FilterConfigProviderManager& filter_config_provider_manager); + FilterConfigProviderManager& filter_config_provider_manager); }; } // namespace HttpConnectionManager diff --git a/source/extensions/filters/network/kafka/mesh/BUILD b/source/extensions/filters/network/kafka/mesh/BUILD deleted file mode 100644 index e707476b50891..0000000000000 --- a/source/extensions/filters/network/kafka/mesh/BUILD +++ /dev/null @@ -1,64 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_extension_package", -) - -licenses(["notice"]) # Apache 2 - -# Kafka-mesh network filter. - -envoy_extension_package() - -envoy_cc_library( - name = "filter_lib", - srcs = ["filter.cc"], - hdrs = [ - "filter.h", - ], - tags = ["skip_on_windows"], - deps = [ - ":abstract_command_lib", - ":request_processor_lib", - "//envoy/buffer:buffer_interface", - "//envoy/network:connection_interface", - "//envoy/network:filter_interface", - "//source/common/common:assert_lib", - "//source/common/common:minimal_logger_lib", - "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//source/extensions/filters/network/kafka:kafka_response_codec_lib", - ], -) - -envoy_cc_library( - name = "request_processor_lib", - srcs = [ - "request_processor.cc", - ], - hdrs = [ - "request_processor.h", - ], - tags = ["skip_on_windows"], - deps = [ - ":abstract_command_lib", - "//source/common/common:minimal_logger_lib", - "//source/extensions/filters/network/kafka:kafka_request_codec_lib", - "//source/extensions/filters/network/kafka:kafka_request_parser_lib", - ], -) - -envoy_cc_library( - name = "abstract_command_lib", - srcs = [ - "abstract_command.cc", - ], - hdrs = [ - "abstract_command.h", - ], - tags = ["skip_on_windows"], - deps = [ - "//source/common/common:minimal_logger_lib", - "//source/extensions/filters/network/kafka:kafka_response_lib", - "//source/extensions/filters/network/kafka:tagged_fields_lib", - ], -) diff --git a/source/extensions/filters/network/kafka/mesh/filter.h b/source/extensions/filters/network/kafka/mesh/filter.h deleted file mode 100644 index 3fde92e6ca273..0000000000000 --- a/source/extensions/filters/network/kafka/mesh/filter.h +++ /dev/null @@ -1,83 +0,0 @@ -#pragma once - -#include "envoy/common/time.h" -#include "envoy/network/connection.h" -#include "envoy/network/filter.h" -#include "envoy/stats/scope.h" - -#include "source/common/common/logger.h" -#include "source/extensions/filters/network/kafka/external/requests.h" -#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" -#include "source/extensions/filters/network/kafka/mesh/request_processor.h" -#include "source/extensions/filters/network/kafka/request_codec.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace Kafka { -namespace Mesh { -/** - * Main entry point. - * Decoded request bytes are passed to processor, that calls us back with enriched request. - * Request then gets invoked with upstream Kafka facade, which will (in future) maintain - * thread-local list of (enriched) Kafka producers. Filter is going to maintain a list of - * in-flight-request so it can send responses when they finish. - * - * - * +----------------+ +-----------------------+ - * |RequestProcessor+----------------->AbstractInFlightRequest| - * +-------^--------+ +------^----------------+ - * | | - * | | - * +-------+-------+ | - * |KafkaMeshFilter+-------------------------+ - * +-------+-------+ - **/ -class KafkaMeshFilter : public Network::ReadFilter, - public Network::ConnectionCallbacks, - public AbstractRequestListener, - private Logger::Loggable { -public: - // Visible for testing. - KafkaMeshFilter(RequestDecoderSharedPtr request_decoder); - - // Non-trivial. See 'abandonAllInFlightRequests'. - ~KafkaMeshFilter() override; - - // Network::ReadFilter - Network::FilterStatus onNewConnection() override; - void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; - Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override; - void onAboveWriteBufferHighWatermark() override; - void onBelowWriteBufferLowWatermark() override; - - // AbstractRequestListener - void onRequest(InFlightRequestSharedPtr request) override; - void onRequestReadyForAnswer() override; - - std::list& getRequestsInFlightForTest(); - -private: - // Helper method invoked when connection gets dropped. - // Request references are going to be stored in 2 places: this filter (request's origin) and in - // UpstreamKafkaClient instances (to match pure-Kafka confirmations to the requests). Because - // filter can be destroyed before confirmations from Kafka are received, we are just going to mark - // related requests as abandoned, so they do not attempt to reference this filter anymore. - // Impl note: this is similar to what Redis filter does. - void abandonAllInFlightRequests(); - - const RequestDecoderSharedPtr request_decoder_; - - Network::ReadFilterCallbacks* read_filter_callbacks_; - - std::list requests_in_flight_; -}; - -} // namespace Mesh -} // namespace Kafka -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/mesh/request_processor.cc b/source/extensions/filters/network/kafka/mesh/request_processor.cc deleted file mode 100644 index d19c8f9edd055..0000000000000 --- a/source/extensions/filters/network/kafka/mesh/request_processor.cc +++ /dev/null @@ -1,31 +0,0 @@ -#include "source/extensions/filters/network/kafka/mesh/request_processor.h" - -#include "envoy/common/exception.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace Kafka { -namespace Mesh { - -// Helper function. Throws a nice message. Filter will react by closing the connection. -static void throwOnUnsupportedRequest(const std::string& reason, const RequestHeader& header) { - throw EnvoyException(absl::StrCat(reason, " Kafka request (key=", header.api_key_, ", version=", - header.api_version_, ", cid=", header.correlation_id_)); -} - -void RequestProcessor::onMessage(AbstractRequestSharedPtr arg) { - // This will be replaced with switch on header's API key. - throwOnUnsupportedRequest("unsupported (bad client API invoked?)", arg->request_header_); -} - -// We got something that the parser could not handle. -void RequestProcessor::onFailedParse(RequestParseFailureSharedPtr arg) { - throwOnUnsupportedRequest("unknown", arg->request_header_); -} - -} // namespace Mesh -} // namespace Kafka -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/mesh/request_processor.h b/source/extensions/filters/network/kafka/mesh/request_processor.h deleted file mode 100644 index ae89ca8ae47be..0000000000000 --- a/source/extensions/filters/network/kafka/mesh/request_processor.h +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -#include "source/common/common/logger.h" -#include "source/extensions/filters/network/kafka/external/requests.h" -#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" -#include "source/extensions/filters/network/kafka/request_codec.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace Kafka { -namespace Mesh { - -/** - * Processes (enriches) incoming requests and passes it back to origin. - */ -class RequestProcessor : public RequestCallback, private Logger::Loggable { -public: - RequestProcessor() = default; - - // RequestCallback - void onMessage(AbstractRequestSharedPtr arg) override; - void onFailedParse(RequestParseFailureSharedPtr) override; -}; - -} // namespace Mesh -} // namespace Kafka -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/source/extensions/filters/network/ratelimit/config.cc b/source/extensions/filters/network/ratelimit/config.cc index fdf84987e9ff2..c665500cee0ee 100644 --- a/source/extensions/filters/network/ratelimit/config.cc +++ b/source/extensions/filters/network/ratelimit/config.cc @@ -28,14 +28,12 @@ Network::FilterFactoryCb RateLimitConfigFactory::createFilterFactoryFromProtoTyp ConfigSharedPtr filter_config(new Config(proto_config, context.scope(), context.runtime())); const std::chrono::milliseconds timeout = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20)); - - return [proto_config, &context, timeout, filter_config, - transport_version = Envoy::Config::Utility::getAndCheckTransportVersion( - proto_config.rate_limit_service())](Network::FilterManager& filter_manager) -> void { + Envoy::Config::Utility::checkTransportVersion(proto_config.rate_limit_service()); + return [proto_config, &context, timeout, + filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared( filter_config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout, - transport_version))); + context, proto_config.rate_limit_service().grpc_service(), timeout))); }; } diff --git a/source/extensions/filters/network/rbac/rbac_filter.cc b/source/extensions/filters/network/rbac/rbac_filter.cc index 705ecf0d417f2..9bd6b6723c244 100644 --- a/source/extensions/filters/network/rbac/rbac_filter.cc +++ b/source/extensions/filters/network/rbac/rbac_filter.cc @@ -28,7 +28,7 @@ Network::FilterStatus RoleBasedAccessControlFilter::onData(Buffer::Instance&, bo "checking connection: requestedServerName: {}, sourceIP: {}, directRemoteIP: {}," "remoteIP: {}, localAddress: {}, ssl: {}, dynamicMetadata: {}", callbacks_->connection().requestedServerName(), - callbacks_->connection().addressProvider().remoteAddress()->asString(), + callbacks_->connection().connectionInfoProvider().remoteAddress()->asString(), callbacks_->connection() .streamInfo() .downstreamAddressProvider() diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index 4a6790b7569de..b356418325799 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -47,20 +47,9 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::PrefixRoutes prefix_routes( proto_config.prefix_routes()); - // Set the catch-all route from the deprecated cluster and settings parameters. - if (prefix_routes.hidden_envoy_deprecated_catch_all_cluster().empty() && - prefix_routes.routes_size() == 0 && !prefix_routes.has_catch_all_route()) { - if (proto_config.hidden_envoy_deprecated_cluster().empty()) { - throw EnvoyException("cannot configure a redis-proxy without any upstream"); - } - - prefix_routes.mutable_catch_all_route()->set_cluster( - proto_config.hidden_envoy_deprecated_cluster()); - } else if (!prefix_routes.hidden_envoy_deprecated_catch_all_cluster().empty() && - !prefix_routes.has_catch_all_route()) { - // Set the catch-all route from the deprecated catch-all cluster. - prefix_routes.mutable_catch_all_route()->set_cluster( - prefix_routes.hidden_envoy_deprecated_catch_all_cluster()); + // Set the catch-all route from the settings parameters. + if (prefix_routes.routes_size() == 0 && !prefix_routes.has_catch_all_route()) { + throw EnvoyException("cannot configure a redis-proxy without any upstream"); } absl::flat_hash_set unique_clusters; diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc index 54a7ef5a05a59..dedff7689748e 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc @@ -19,8 +19,8 @@ SniDynamicForwardProxyNetworkFilterConfigFactory::createFilterFactoryFromProtoTy const FilterConfig& proto_config, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.threadLocal(), - context.api().randomGenerator(), context.runtime(), context.scope()); + context.singletonManager(), context.dispatcher(), context.threadLocal(), context.api(), + context.runtime(), context.scope(), context.messageValidationVisitor()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); diff --git a/source/extensions/filters/network/tcp_proxy/config.cc b/source/extensions/filters/network/tcp_proxy/config.cc index ccdc6e7a69da4..8de0ac9224a70 100644 --- a/source/extensions/filters/network/tcp_proxy/config.cc +++ b/source/extensions/filters/network/tcp_proxy/config.cc @@ -17,10 +17,6 @@ Network::FilterFactoryCb ConfigFactory::createFilterFactoryFromProtoTyped( ASSERT(!proto_config.stat_prefix().empty()); auto _ = Envoy::Router::HeaderParser::configure(proto_config.tunneling_config().headers_to_add()); - if (proto_config.has_hidden_envoy_deprecated_deprecated_v1()) { - ASSERT(proto_config.hidden_envoy_deprecated_deprecated_v1().routes_size() > 0); - } - Envoy::TcpProxy::ConfigSharedPtr filter_config( std::make_shared(proto_config, context)); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { diff --git a/source/extensions/filters/network/thrift_proxy/BUILD b/source/extensions/filters/network/thrift_proxy/BUILD index be42dd471b500..8b6e66a083881 100644 --- a/source/extensions/filters/network/thrift_proxy/BUILD +++ b/source/extensions/filters/network/thrift_proxy/BUILD @@ -47,6 +47,7 @@ envoy_cc_extension( ":framed_transport_lib", ":header_transport_lib", ":protocol_interface", + ":protocol_options_config_lib", ":twitter_protocol_lib", ":unframed_transport_lib", "//envoy/registry", @@ -265,6 +266,15 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "protocol_options_config_lib", + hdrs = ["protocol_options_config.h"], + deps = [ + ":thrift_lib", + "//envoy/upstream:upstream_interface", + ], +) + envoy_cc_library( name = "thrift_lib", hdrs = ["thrift.h"], diff --git a/source/extensions/filters/network/thrift_proxy/config.cc b/source/extensions/filters/network/thrift_proxy/config.cc index bd0cf021f079e..ae272ed1b480f 100644 --- a/source/extensions/filters/network/thrift_proxy/config.cc +++ b/source/extensions/filters/network/thrift_proxy/config.cc @@ -158,11 +158,7 @@ void ConfigImpl::processFilter( ENVOY_LOG(debug, " name: {}", string_name); ENVOY_LOG(debug, " config: {}", MessageUtil::getJsonStringFromMessageOrError( - proto_config.has_typed_config() - ? static_cast(proto_config.typed_config()) - : static_cast( - proto_config.hidden_envoy_deprecated_config()), - true)); + static_cast(proto_config.typed_config()), true)); auto& factory = Envoy::Config::Utility::getAndCheckFactory( proto_config); diff --git a/source/extensions/filters/network/thrift_proxy/config.h b/source/extensions/filters/network/thrift_proxy/config.h index 8cc5d722071f4..1eea3cc9e6c8f 100644 --- a/source/extensions/filters/network/thrift_proxy/config.h +++ b/source/extensions/filters/network/thrift_proxy/config.h @@ -9,6 +9,7 @@ #include "source/extensions/filters/network/common/factory_base.h" #include "source/extensions/filters/network/thrift_proxy/conn_manager.h" #include "source/extensions/filters/network/thrift_proxy/filters/filter.h" +#include "source/extensions/filters/network/thrift_proxy/protocol_options_config.h" #include "source/extensions/filters/network/thrift_proxy/router/router_impl.h" #include "source/extensions/filters/network/well_known_names.h" diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.cc b/source/extensions/filters/network/thrift_proxy/conn_manager.cc index c3de018f2dfda..1e100247e59cc 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.cc +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.cc @@ -319,46 +319,45 @@ void ConnectionManager::ActiveRpcDecoderFilter::continueDecoding() { FilterStatus ConnectionManager::ActiveRpc::applyDecoderFilters(ActiveRpcDecoderFilter* filter) { ASSERT(filter_action_ != nullptr); - if (!local_response_sent_) { - if (upgrade_handler_) { - // Divert events to the current protocol upgrade handler. - const FilterStatus status = filter_action_(upgrade_handler_.get()); - filter_context_.reset(); - return status; - } + if (local_response_sent_) { + filter_action_ = nullptr; + filter_context_.reset(); + return FilterStatus::Continue; + } - std::list::iterator entry; - if (!filter) { - entry = decoder_filters_.begin(); - } else { - entry = std::next(filter->entry()); - } + if (upgrade_handler_) { + // Divert events to the current protocol upgrade handler. + const FilterStatus status = filter_action_(upgrade_handler_.get()); + filter_context_.reset(); + return status; + } - for (; entry != decoder_filters_.end(); entry++) { - const FilterStatus status = filter_action_((*entry)->handle_.get()); - if (local_response_sent_) { - // The filter called sendLocalReply but _did not_ close the connection. - // We return FilterStatus::Continue irrespective of the current result, - // which is fine because subsequent calls to this method will skip - // filters anyway. - // - // Note: we need to return FilterStatus::Continue here, in order for decoding - // to proceed. This is important because as noted above, the connection remains - // open so we need to consume the remaining bytes. - break; - } + std::list::iterator entry = + !filter ? decoder_filters_.begin() : std::next(filter->entry()); + for (; entry != decoder_filters_.end(); entry++) { + const FilterStatus status = filter_action_((*entry)->handle_.get()); + if (local_response_sent_) { + // The filter called sendLocalReply but _did not_ close the connection. + // We return FilterStatus::Continue irrespective of the current result, + // which is fine because subsequent calls to this method will skip + // filters anyway. + // + // Note: we need to return FilterStatus::Continue here, in order for decoding + // to proceed. This is important because as noted above, the connection remains + // open so we need to consume the remaining bytes. + break; + } - if (status != FilterStatus::Continue) { - // If we got FilterStatus::StopIteration and a local reply happened but - // local_response_sent_ was not set, the connection was closed. - // - // In this case, either resetAllRpcs() gets called via onEvent(LocalClose) or - // dispatch() stops the processing. - // - // In other words, after a local reply closes the connection and StopIteration - // is returned we are done. - return status; - } + if (status != FilterStatus::Continue) { + // If we got FilterStatus::StopIteration and a local reply happened but + // local_response_sent_ was not set, the connection was closed. + // + // In this case, either resetAllRpcs() gets called via onEvent(LocalClose) or + // dispatch() stops the processing. + // + // In other words, after a local reply closes the connection and StopIteration + // is returned we are done. + return status; } } diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index f4e19152d0aa1..064d29b050607 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -42,17 +42,6 @@ class Config { virtual uint64_t maxRequestsPerConnection() const PURE; }; -/** - * Extends Upstream::ProtocolOptionsConfig with Thrift-specific cluster options. - */ -class ProtocolOptionsConfig : public Upstream::ProtocolOptionsConfig { -public: - ~ProtocolOptionsConfig() override = default; - - virtual TransportType transport(TransportType downstream_transport) const PURE; - virtual ProtocolType protocol(ProtocolType downstream_protocol) const PURE; -}; - /** * ConnectionManager is a Network::Filter that will perform Thrift request handling on a connection. */ @@ -165,7 +154,7 @@ class ConnectionManager : public Network::ReadFilter, parent_.stats_.request_time_ms_, parent_.time_source_)), stream_id_(parent_.random_generator_.random()), stream_info_(parent_.time_source_, - parent_.read_callbacks_->connection().addressProviderSharedPtr()), + parent_.read_callbacks_->connection().connectionInfoProviderSharedPtr()), local_response_sent_{false}, pending_transport_end_{false} { parent_.stats_.request_active_.inc(); } diff --git a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc index 82217079c8e9a..c1baa8ba3117d 100644 --- a/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc +++ b/source/extensions/filters/network/thrift_proxy/filters/ratelimit/config.cc @@ -30,14 +30,12 @@ RateLimitFilterConfig::createFilterFactoryFromProtoTyped( const std::chrono::milliseconds timeout = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20)); - return [proto_config, &context, timeout, config, - transport_version = Envoy::Config::Utility::getAndCheckTransportVersion( - proto_config.rate_limit_service())]( - ThriftProxy::ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { + Envoy::Config::Utility::checkTransportVersion(proto_config.rate_limit_service()); + return [proto_config, &context, timeout, + config](ThriftProxy::ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addDecoderFilter(std::make_shared( config, Filters::Common::RateLimit::rateLimitClient( - context, proto_config.rate_limit_service().grpc_service(), timeout, - transport_version))); + context, proto_config.rate_limit_service().grpc_service(), timeout))); }; } diff --git a/source/extensions/filters/network/thrift_proxy/metadata.h b/source/extensions/filters/network/thrift_proxy/metadata.h index 560952003f92f..08b91a1c4f040 100644 --- a/source/extensions/filters/network/thrift_proxy/metadata.h +++ b/source/extensions/filters/network/thrift_proxy/metadata.h @@ -30,6 +30,71 @@ class MessageMetadata { public: MessageMetadata() = default; + std::shared_ptr clone() const { + auto copy = std::make_shared(); + + if (hasFrameSize()) { + copy->setFrameSize(frameSize()); + } + + if (hasProtocol()) { + copy->setProtocol(protocol()); + } + + if (hasMethodName()) { + copy->setMethodName(methodName()); + } + + if (hasSequenceId()) { + copy->setSequenceId(sequenceId()); + } + + if (hasMessageType()) { + copy->setMessageType(messageType()); + } + + Http::HeaderMapImpl::copyFrom(copy->headers(), headers()); + copy->mutableSpans().assign(spans().begin(), spans().end()); + + if (hasAppException()) { + copy->setAppException(appExceptionType(), appExceptionMessage()); + } + + copy->setProtocolUpgradeMessage(isProtocolUpgradeMessage()); + + auto trace_id = traceId(); + if (trace_id.has_value()) { + copy->setTraceId(trace_id.value()); + } + + auto trace_id_high = traceIdHigh(); + if (trace_id_high.has_value()) { + copy->setTraceIdHigh(trace_id_high.value()); + } + + auto span_id = spanId(); + if (span_id.has_value()) { + copy->setSpanId(span_id.value()); + } + + auto parent_span_id = parentSpanId(); + if (parent_span_id.has_value()) { + copy->setParentSpanId(parent_span_id.value()); + } + + auto flags_opt = flags(); + if (flags_opt.has_value()) { + copy->setFlags(flags_opt.value()); + } + + auto sampled_opt = sampled(); + if (sampled_opt.has_value()) { + copy->setSampled(sampled_opt.value()); + } + + return copy; + } + bool hasFrameSize() const { return frame_size_.has_value(); } uint32_t frameSize() const { return frame_size_.value(); } void setFrameSize(uint32_t size) { frame_size_ = size; } diff --git a/source/extensions/filters/network/thrift_proxy/protocol_options_config.h b/source/extensions/filters/network/thrift_proxy/protocol_options_config.h new file mode 100644 index 0000000000000..e6e1b1952c4ab --- /dev/null +++ b/source/extensions/filters/network/thrift_proxy/protocol_options_config.h @@ -0,0 +1,26 @@ +#pragma once + +#include "envoy/upstream/upstream.h" + +#include "source/extensions/filters/network/thrift_proxy/thrift.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ThriftProxy { + +/** + * Extends Upstream::ProtocolOptionsConfig with Thrift-specific cluster options. + */ +class ProtocolOptionsConfig : public Upstream::ProtocolOptionsConfig { +public: + ~ProtocolOptionsConfig() override = default; + + virtual TransportType transport(TransportType downstream_transport) const PURE; + virtual ProtocolType protocol(ProtocolType downstream_protocol) const PURE; +}; + +} // namespace ThriftProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/thrift_proxy/router/BUILD b/source/extensions/filters/network/thrift_proxy/router/BUILD index 7eb78e9adb33d..048ed66baa952 100644 --- a/source/extensions/filters/network/thrift_proxy/router/BUILD +++ b/source/extensions/filters/network/thrift_proxy/router/BUILD @@ -30,8 +30,12 @@ envoy_cc_library( "//envoy/router:router_interface", "//envoy/tcp:conn_pool_interface", "//source/common/buffer:buffer_lib", + "//source/common/common:logger_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/thrift_proxy:app_exception_lib", "//source/extensions/filters/network/thrift_proxy:metadata_lib", "//source/extensions/filters/network/thrift_proxy:protocol_converter_lib", + "//source/extensions/filters/network/thrift_proxy:protocol_options_config_lib", ], ) @@ -52,6 +56,7 @@ envoy_cc_library( deps = [ ":router_interface", "//envoy/tcp:conn_pool_interface", + "//source/common/common:logger_lib", "//source/extensions/filters/network/thrift_proxy:app_exception_lib", "//source/extensions/filters/network/thrift_proxy:conn_manager_lib", "//source/extensions/filters/network/thrift_proxy:thrift_object_interface", @@ -60,6 +65,29 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "shadow_writer_lib", + srcs = ["shadow_writer_impl.cc"], + hdrs = ["shadow_writer_impl.h"], + deps = [ + ":router_interface", + ":upstream_request_lib", + "//envoy/tcp:conn_pool_interface", + "//envoy/upstream:cluster_manager_interface", + "//envoy/upstream:load_balancer_interface", + "//envoy/upstream:thread_local_cluster_interface", + "//source/common/common:linked_object", + "//source/common/upstream:load_balancer_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/thrift_proxy:app_exception_lib", + "//source/extensions/filters/network/thrift_proxy:conn_manager_lib", + "//source/extensions/filters/network/thrift_proxy:protocol_converter_lib", + "//source/extensions/filters/network/thrift_proxy:protocol_interface", + "//source/extensions/filters/network/thrift_proxy:thrift_object_interface", + "//source/extensions/filters/network/thrift_proxy:transport_interface", + ], +) + envoy_cc_library( name = "router_lib", srcs = ["router_impl.cc"], @@ -67,16 +95,15 @@ envoy_cc_library( deps = [ ":router_interface", ":router_ratelimit_lib", + ":shadow_writer_lib", ":upstream_request_lib", "//envoy/tcp:conn_pool_interface", "//envoy/upstream:cluster_manager_interface", "//envoy/upstream:load_balancer_interface", "//envoy/upstream:thread_local_cluster_interface", - "//source/common/common:logger_lib", "//source/common/http:header_utility_lib", "//source/common/router:metadatamatchcriteria_lib", "//source/common/upstream:load_balancer_lib", - "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/thrift_proxy:app_exception_lib", "//source/extensions/filters/network/thrift_proxy:conn_manager_lib", "//source/extensions/filters/network/thrift_proxy:protocol_converter_lib", diff --git a/source/extensions/filters/network/thrift_proxy/router/config.cc b/source/extensions/filters/network/thrift_proxy/router/config.cc index ef94242c89b8d..3e651b2886b7f 100644 --- a/source/extensions/filters/network/thrift_proxy/router/config.cc +++ b/source/extensions/filters/network/thrift_proxy/router/config.cc @@ -5,6 +5,7 @@ #include "envoy/registry/registry.h" #include "source/extensions/filters/network/thrift_proxy/router/router_impl.h" +#include "source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h" namespace Envoy { namespace Extensions { @@ -17,9 +18,14 @@ ThriftFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoT const std::string& stat_prefix, Server::Configuration::FactoryContext& context) { UNREFERENCED_PARAMETER(proto_config); - return [&context, stat_prefix](ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { - callbacks.addDecoderFilter( - std::make_shared(context.clusterManager(), stat_prefix, context.scope())); + auto shadow_writer = + std::make_shared(context.clusterManager(), stat_prefix, context.scope(), + context.dispatcher(), context.threadLocal()); + + return [&context, stat_prefix, + shadow_writer](ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addDecoderFilter(std::make_shared( + context.clusterManager(), stat_prefix, context.scope(), context.runtime(), *shadow_writer)); }; } diff --git a/source/extensions/filters/network/thrift_proxy/router/router.h b/source/extensions/filters/network/thrift_proxy/router/router.h index 603a08c4c0159..dfda11ac34407 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router.h +++ b/source/extensions/filters/network/thrift_proxy/router/router.h @@ -2,14 +2,19 @@ #include #include +#include #include "envoy/buffer/buffer.h" #include "envoy/router/router.h" #include "envoy/tcp/conn_pool.h" #include "source/common/buffer/buffer_impl.h" +#include "source/common/common/logger.h" +#include "source/extensions/filters/network/thrift_proxy/app_exception_impl.h" #include "source/extensions/filters/network/thrift_proxy/metadata.h" #include "source/extensions/filters/network/thrift_proxy/protocol_converter.h" +#include "source/extensions/filters/network/thrift_proxy/protocol_options_config.h" +#include "source/extensions/filters/network/well_known_names.h" namespace Envoy { namespace Extensions { @@ -18,6 +23,7 @@ namespace ThriftProxy { namespace Router { class RateLimitPolicy; +class RequestMirrorPolicy; /** * RouteEntry is an individual resolved route entry. @@ -51,6 +57,13 @@ class RouteEntry { * @return const Http::LowerCaseString& the header used to determine the cluster. */ virtual const Http::LowerCaseString& clusterHeader() const PURE; + + /** + * @return const std::vector& the mirror policies associated with this route, + * if any. + */ + virtual const std::vector>& + requestMirrorPolicies() const PURE; }; /** @@ -101,7 +114,7 @@ struct RouterStats { /** * This interface is used by an upstream request to communicate its state. */ -class RequestOwner : public ProtocolConverter { +class RequestOwner : public ProtocolConverter, public Logger::Loggable { public: RequestOwner(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix, Stats::Scope& scope) @@ -181,6 +194,11 @@ class RequestOwner : public ProtocolConverter { */ Upstream::ClusterManager& clusterManager() { return cluster_manager_; } + /** + * @return Upstream::Cluster& the upstream cluster associated with the request. + */ + const Upstream::ClusterInfo& cluster() const { return *cluster_; } + /** * Common stats. */ @@ -269,6 +287,85 @@ class RequestOwner : public ProtocolConverter { recordClusterScopeHistogram(cluster, {upstream_rq_time_}, unit, value); } +protected: + struct UpstreamRequestInfo { + bool passthrough_supported; + TransportType transport; + ProtocolType protocol; + absl::optional conn_pool_data; + }; + + struct PrepareUpstreamRequestResult { + absl::optional exception; + absl::optional upstream_request_info; + }; + + PrepareUpstreamRequestResult prepareUpstreamRequest(const std::string& cluster_name, + MessageMetadataSharedPtr& metadata, + TransportType transport, + ProtocolType protocol, + Upstream::LoadBalancerContext* lb_context) { + Upstream::ThreadLocalCluster* cluster = clusterManager().getThreadLocalCluster(cluster_name); + if (!cluster) { + ENVOY_LOG(debug, "unknown cluster '{}'", cluster_name); + stats().unknown_cluster_.inc(); + return {AppException(AppExceptionType::InternalError, + fmt::format("unknown cluster '{}'", cluster_name)), + absl::nullopt}; + } + + cluster_ = cluster->info(); + ENVOY_LOG(debug, "cluster '{}' match for method '{}'", cluster_name, metadata->methodName()); + + switch (metadata->messageType()) { + case MessageType::Call: + incRequestCall(*cluster_); + break; + + case MessageType::Oneway: + incRequestOneWay(*cluster_); + break; + + default: + incRequestInvalid(*cluster_); + break; + } + + if (cluster_->maintenanceMode()) { + stats().upstream_rq_maintenance_mode_.inc(); + return {AppException(AppExceptionType::InternalError, + fmt::format("maintenance mode for cluster '{}'", cluster_name)), + absl::nullopt}; + } + + const std::shared_ptr options = + cluster_->extensionProtocolOptionsTyped( + NetworkFilterNames::get().ThriftProxy); + + const TransportType final_transport = options ? options->transport(transport) : transport; + ASSERT(final_transport != TransportType::Auto); + + const ProtocolType final_protocol = options ? options->protocol(protocol) : protocol; + ASSERT(final_protocol != ProtocolType::Auto); + + auto conn_pool_data = cluster->tcpConnPool(Upstream::ResourcePriority::Default, lb_context); + if (!conn_pool_data) { + stats().no_healthy_upstream_.inc(); + return {AppException(AppExceptionType::InternalError, + fmt::format("no healthy upstream for '{}'", cluster_name)), + absl::nullopt}; + } + + const auto passthrough_supported = + transport == TransportType::Framed && final_transport == TransportType::Framed && + protocol == final_protocol && final_protocol != ProtocolType::Twitter; + UpstreamRequestInfo result = {passthrough_supported, final_transport, final_protocol, + conn_pool_data}; + return {absl::nullopt, result}; + } + + Upstream::ClusterInfoConstSharedPtr cluster_; + private: void incClusterScopeCounter(const Upstream::ClusterInfo& cluster, const Stats::StatNameVec& names) const { @@ -308,6 +405,82 @@ class RequestOwner : public ProtocolConverter { const Stats::StatName upstream_resp_size_; }; +/** + * RequestMirrorPolicy is an individual mirroring rule for a route entry. + */ +class RequestMirrorPolicy { +public: + virtual ~RequestMirrorPolicy() = default; + + /** + * @return const std::string& the upstream cluster that should be used for the mirrored request. + */ + virtual const std::string& clusterName() const PURE; + + /** + * @return bool whether this policy is currently enabled. + */ + virtual bool enabled(Runtime::Loader& runtime) const PURE; +}; + +/** + * ShadowRouterHandle is used to write a request or release a connection early if needed. + */ +class ShadowRouterHandle { +public: + virtual ~ShadowRouterHandle() = default; + + /** + * Called after the Router is destroyed. + */ + virtual void onRouterDestroy() PURE; + + /** + * Checks if the request is currently waiting for an upstream connection to become available. + */ + virtual bool waitingForConnection() const PURE; + + /** + * @return RequestOwner& the interface associated with this ShadowRouter. + */ + virtual RequestOwner& requestOwner() PURE; +}; + +/** + * ShadowWriter is used for submitting requests and ignoring the response. + */ +class ShadowWriter { +public: + virtual ~ShadowWriter() = default; + + /** + * @return Upstream::ClusterManager& the cluster manager. + */ + virtual Upstream::ClusterManager& clusterManager() PURE; + + /** + * @return std::string& the stat prefix used by the router. + */ + virtual const std::string& statPrefix() const PURE; + + /** + * @return Stats::Scope& the Scope used by the router. + */ + virtual Stats::Scope& scope() PURE; + + /** + * @return Dispatcher& the dispatcher. + */ + virtual Event::Dispatcher& dispatcher() PURE; + + /** + * Starts the shadow request by requesting an upstream connection. + */ + virtual absl::optional> + submit(const std::string& cluster_name, MessageMetadataSharedPtr metadata, + TransportType original_transport, ProtocolType original_protocol) PURE; +}; + } // namespace Router } // namespace ThriftProxy } // namespace NetworkFilters diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc index f0a5ef11bb1af..5cdad0c0b468d 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc @@ -9,7 +9,6 @@ #include "source/common/common/utility.h" #include "source/common/router/metadatamatchcriteria_impl.h" #include "source/extensions/filters/network/thrift_proxy/app_exception_impl.h" -#include "source/extensions/filters/network/well_known_names.h" #include "absl/strings/match.h" @@ -25,7 +24,8 @@ RouteEntryImplBase::RouteEntryImplBase( config_headers_(Http::HeaderUtility::buildHeaderDataVector(route.match().headers())), rate_limit_policy_(route.route().rate_limits()), strip_service_name_(route.route().strip_service_name()), - cluster_header_(route.route().cluster_header()) { + cluster_header_(route.route().cluster_header()), + mirror_policies_(buildMirrorPolicies(route.route())) { if (route.route().has_metadata_match()) { const auto filter_it = route.route().metadata_match().filter_metadata().find( Envoy::Config::MetadataFilters::get().ENVOY_LB); @@ -48,6 +48,21 @@ RouteEntryImplBase::RouteEntryImplBase( } } +std::vector> RouteEntryImplBase::buildMirrorPolicies( + const envoy::extensions::filters::network::thrift_proxy::v3::RouteAction& route) { + std::vector> policies{}; + + const auto& proto_policies = route.request_mirror_policies(); + policies.reserve(proto_policies.size()); + for (const auto& policy : proto_policies) { + policies.push_back(std::make_shared( + policy.cluster(), policy.runtime_fraction().runtime_key(), + policy.runtime_fraction().default_value())); + } + + return policies; +} + const std::string& RouteEntryImplBase::clusterName() const { return cluster_name_; } const RouteEntry* RouteEntryImplBase::routeEntry() const { return this; } @@ -188,10 +203,17 @@ void Router::onDestroy() { upstream_request_->resetStream(); cleanup(); } + + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().onRouterDestroy(); + } + + shadow_routers_.clear(); } void Router::setDecoderFilterCallbacks(ThriftFilters::DecoderFilterCallbacks& callbacks) { callbacks_ = &callbacks; + upstream_response_callbacks_ = std::make_unique(callbacks_); // TODO(zuercher): handle buffer limits } @@ -225,205 +247,221 @@ FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) { route_entry_ = route_->routeEntry(); const std::string& cluster_name = route_entry_->clusterName(); - Upstream::ThreadLocalCluster* cluster = clusterManager().getThreadLocalCluster(cluster_name); - if (!cluster) { - ENVOY_STREAM_LOG(debug, "unknown cluster '{}'", *callbacks_, cluster_name); - stats().unknown_cluster_.inc(); - callbacks_->sendLocalReply(AppException(AppExceptionType::InternalError, - fmt::format("unknown cluster '{}'", cluster_name)), - true); + auto prepare_result = + prepareUpstreamRequest(cluster_name, metadata, callbacks_->downstreamTransportType(), + callbacks_->downstreamProtocolType(), this); + if (prepare_result.exception.has_value()) { + callbacks_->sendLocalReply(prepare_result.exception.value(), true); return FilterStatus::StopIteration; } - cluster_ = cluster->info(); - ENVOY_STREAM_LOG(debug, "cluster '{}' match for method '{}'", *callbacks_, cluster_name, - metadata->methodName()); - switch (metadata->messageType()) { - case MessageType::Call: - incRequestCall(*cluster_); - break; + ENVOY_STREAM_LOG(debug, "router decoding request", *callbacks_); - case MessageType::Oneway: - incRequestOneWay(*cluster_); - break; + if (route_entry_->stripServiceName()) { + const auto& method = metadata->methodName(); + const auto pos = method.find(':'); + if (pos != std::string::npos) { + metadata->setMethodName(method.substr(pos + 1)); + } + } - default: - incRequestInvalid(*cluster_); - break; + auto& upstream_req_info = prepare_result.upstream_request_info.value(); + passthrough_supported_ = upstream_req_info.passthrough_supported; + + // Prepare connections for shadow routers, if there are mirror policies configured and currently + // enabled. + const auto& policies = route_entry_->requestMirrorPolicies(); + if (!policies.empty()) { + for (const auto& policy : policies) { + if (policy->enabled(runtime_)) { + auto shadow_router = + shadow_writer_.submit(policy->clusterName(), metadata, upstream_req_info.transport, + upstream_req_info.protocol); + if (shadow_router.has_value()) { + shadow_routers_.push_back(shadow_router.value()); + } + } + } } - if (cluster_->maintenanceMode()) { - stats().upstream_rq_maintenance_mode_.inc(); - callbacks_->sendLocalReply( - AppException(AppExceptionType::InternalError, - fmt::format("maintenance mode for cluster '{}'", cluster_name)), - true); - return FilterStatus::StopIteration; + upstream_request_ = + std::make_unique(*this, *upstream_req_info.conn_pool_data, metadata, + upstream_req_info.transport, upstream_req_info.protocol); + return upstream_request_->start(); +} + +FilterStatus Router::messageEnd() { + ProtocolConverter::messageEnd(); + const auto encode_size = upstream_request_->encodeAndWrite(upstream_request_buffer_); + addSize(encode_size); + recordUpstreamRequestSize(*cluster_, request_size_); + + // Dispatch shadow requests, if any. + // Note: if connections aren't ready, the write will happen when appropriate. + for (auto& shadow_router : shadow_routers_) { + auto& router = shadow_router.get(); + router.requestOwner().messageEnd(); } - const std::shared_ptr options = - cluster_->extensionProtocolOptionsTyped( - NetworkFilterNames::get().ThriftProxy); + return FilterStatus::Continue; +} - const TransportType transport = options - ? options->transport(callbacks_->downstreamTransportType()) - : callbacks_->downstreamTransportType(); - ASSERT(transport != TransportType::Auto); +FilterStatus Router::passthroughData(Buffer::Instance& data) { + for (auto& shadow_router : shadow_routers_) { + Buffer::OwnedImpl shadow_data; + shadow_data.add(data); + shadow_router.get().requestOwner().passthroughData(shadow_data); + } - const ProtocolType protocol = options ? options->protocol(callbacks_->downstreamProtocolType()) - : callbacks_->downstreamProtocolType(); - ASSERT(protocol != ProtocolType::Auto); + return ProtocolConverter::passthroughData(data); +} - if (callbacks_->downstreamTransportType() == TransportType::Framed && - transport == TransportType::Framed && callbacks_->downstreamProtocolType() == protocol && - protocol != ProtocolType::Twitter) { - passthrough_supported_ = true; +FilterStatus Router::structBegin(absl::string_view name) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().structBegin(name); } - auto conn_pool_data = cluster->tcpConnPool(Upstream::ResourcePriority::Default, this); - if (!conn_pool_data) { - stats().no_healthy_upstream_.inc(); - callbacks_->sendLocalReply( - AppException(AppExceptionType::InternalError, - fmt::format("no healthy upstream for '{}'", cluster_name)), - true); - return FilterStatus::StopIteration; + return ProtocolConverter::structBegin(name); +} + +FilterStatus Router::structEnd() { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().structEnd(); } - ENVOY_STREAM_LOG(debug, "router decoding request", *callbacks_); + return ProtocolConverter::structEnd(); +} - if (route_entry_->stripServiceName()) { - const auto& method = metadata->methodName(); - const auto pos = method.find(':'); - if (pos != std::string::npos) { - metadata->setMethodName(method.substr(pos + 1)); - } +FilterStatus Router::fieldBegin(absl::string_view name, FieldType& field_type, int16_t& field_id) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().fieldBegin(name, field_type, field_id); } - upstream_request_ = - std::make_unique(*this, *conn_pool_data, metadata, transport, protocol); - return upstream_request_->start(); + return ProtocolConverter::fieldBegin(name, field_type, field_id); } -FilterStatus Router::messageEnd() { - ProtocolConverter::messageEnd(); +FilterStatus Router::fieldEnd() { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().fieldEnd(); + } - Buffer::OwnedImpl transport_buffer; + return ProtocolConverter::fieldEnd(); +} - upstream_request_->metadata_->setProtocol(upstream_request_->protocol_->type()); +FilterStatus Router::boolValue(bool& value) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().boolValue(value); + } - upstream_request_->transport_->encodeFrame(transport_buffer, *upstream_request_->metadata_, - upstream_request_buffer_); + return ProtocolConverter::boolValue(value); +} - request_size_ += transport_buffer.length(); - recordUpstreamRequestSize(*cluster_, request_size_); +FilterStatus Router::byteValue(uint8_t& value) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().byteValue(value); + } - upstream_request_->conn_data_->connection().write(transport_buffer, false); - upstream_request_->onRequestComplete(); - return FilterStatus::Continue; + return ProtocolConverter::byteValue(value); } -void Router::onUpstreamData(Buffer::Instance& data, bool end_stream) { - ASSERT(!upstream_request_->response_complete_); +FilterStatus Router::int16Value(int16_t& value) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().int16Value(value); + } - response_size_ += data.length(); + return ProtocolConverter::int16Value(value); +} - if (upstream_request_->upgrade_response_ != nullptr) { - ENVOY_STREAM_LOG(trace, "reading upgrade response: {} bytes", *callbacks_, data.length()); - // Handle upgrade response. - if (!upstream_request_->upgrade_response_->onData(data)) { - // Wait for more data. - return; - } +FilterStatus Router::int32Value(int32_t& value) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().int32Value(value); + } - ENVOY_STREAM_LOG(debug, "upgrade response complete", *callbacks_); - upstream_request_->protocol_->completeUpgrade(*upstream_request_->conn_state_, - *upstream_request_->upgrade_response_); + return ProtocolConverter::int32Value(value); +} - upstream_request_->upgrade_response_.reset(); - upstream_request_->onRequestStart(true); - } else { - ENVOY_STREAM_LOG(trace, "reading response: {} bytes", *callbacks_, data.length()); +FilterStatus Router::int64Value(int64_t& value) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().int64Value(value); + } - // Handle normal response. - if (!upstream_request_->response_started_) { - callbacks_->startUpstreamResponse(*upstream_request_->transport_, - *upstream_request_->protocol_); - upstream_request_->response_started_ = true; - } + return ProtocolConverter::int64Value(value); +} - ThriftFilters::ResponseStatus status = callbacks_->upstreamData(data); - if (status == ThriftFilters::ResponseStatus::Complete) { - ENVOY_STREAM_LOG(debug, "response complete", *callbacks_); - - recordUpstreamResponseSize(*cluster_, response_size_); - - switch (callbacks_->responseMetadata()->messageType()) { - case MessageType::Reply: - incResponseReply(*cluster_); - if (callbacks_->responseSuccess()) { - upstream_request_->upstream_host_->outlierDetector().putResult( - Upstream::Outlier::Result::ExtOriginRequestSuccess); - incResponseReplySuccess(*cluster_); - } else { - upstream_request_->upstream_host_->outlierDetector().putResult( - Upstream::Outlier::Result::ExtOriginRequestFailed); - incResponseReplyError(*cluster_); - } - break; +FilterStatus Router::doubleValue(double& value) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().doubleValue(value); + } - case MessageType::Exception: - upstream_request_->upstream_host_->outlierDetector().putResult( - Upstream::Outlier::Result::ExtOriginRequestFailed); - incResponseException(*cluster_); - break; + return ProtocolConverter::doubleValue(value); +} - default: - incResponseInvalidType(*cluster_); - break; - } - upstream_request_->onResponseComplete(); - cleanup(); - return; - } else if (status == ThriftFilters::ResponseStatus::Reset) { - // Note: invalid responses are not accounted in the response size histogram. - ENVOY_STREAM_LOG(debug, "upstream reset", *callbacks_); - upstream_request_->upstream_host_->outlierDetector().putResult( - Upstream::Outlier::Result::ExtOriginRequestFailed); - upstream_request_->resetStream(); - return; - } +FilterStatus Router::stringValue(absl::string_view value) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().stringValue(value); } - if (end_stream) { - // Response is incomplete, but no more data is coming. - ENVOY_STREAM_LOG(debug, "response underflow", *callbacks_); - upstream_request_->onResponseComplete(); - upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::RemoteConnectionFailure); - cleanup(); + return ProtocolConverter::stringValue(value); +} + +FilterStatus Router::mapBegin(FieldType& key_type, FieldType& value_type, uint32_t& size) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().mapBegin(key_type, value_type, size); + } + + return ProtocolConverter::mapBegin(key_type, value_type, size); +} + +FilterStatus Router::mapEnd() { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().mapEnd(); + } + + return ProtocolConverter::mapEnd(); +} + +FilterStatus Router::listBegin(FieldType& elem_type, uint32_t& size) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().listBegin(elem_type, size); + } + + return ProtocolConverter::listBegin(elem_type, size); +} + +FilterStatus Router::listEnd() { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().listEnd(); } + + return ProtocolConverter::listEnd(); } -void Router::onEvent(Network::ConnectionEvent event) { - ASSERT(upstream_request_ && !upstream_request_->response_complete_); +FilterStatus Router::setBegin(FieldType& elem_type, uint32_t& size) { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().setBegin(elem_type, size); + } + + return ProtocolConverter::setBegin(elem_type, size); +} - switch (event) { - case Network::ConnectionEvent::RemoteClose: - ENVOY_STREAM_LOG(debug, "upstream remote close", *callbacks_); - upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::RemoteConnectionFailure); - break; - case Network::ConnectionEvent::LocalClose: - ENVOY_STREAM_LOG(debug, "upstream local close", *callbacks_); - upstream_request_->onResetStream(ConnectionPool::PoolFailureReason::LocalConnectionFailure); - break; - default: - // Connected is consumed by the connection pool. - NOT_REACHED_GCOVR_EXCL_LINE; +FilterStatus Router::setEnd() { + for (auto& shadow_router : shadow_routers_) { + shadow_router.get().requestOwner().setEnd(); } - upstream_request_->releaseConnection(false); + return ProtocolConverter::setEnd(); } +void Router::onUpstreamData(Buffer::Instance& data, bool end_stream) { + const bool done = + upstream_request_->handleUpstreamData(data, end_stream, *upstream_response_callbacks_); + if (done) { + cleanup(); + } +} + +void Router::onEvent(Network::ConnectionEvent event) { upstream_request_->onEvent(event); } + const Network::Connection* Router::downstreamConnection() const { if (callbacks_ != nullptr) { return callbacks_->connection(); diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.h b/source/extensions/filters/network/thrift_proxy/router/router_impl.h index 5410028323b88..0b60226f57dce 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.h +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.h @@ -11,7 +11,6 @@ #include "envoy/tcp/conn_pool.h" #include "envoy/upstream/load_balancer.h" -#include "source/common/common/logger.h" #include "source/common/http/header_utility.h" #include "source/common/upstream/load_balancer_impl.h" #include "source/extensions/filters/network/thrift_proxy/conn_manager.h" @@ -29,6 +28,25 @@ namespace NetworkFilters { namespace ThriftProxy { namespace Router { +class RequestMirrorPolicyImpl : public RequestMirrorPolicy { +public: + RequestMirrorPolicyImpl(const std::string& cluster_name, const std::string& runtime_key, + const envoy::type::v3::FractionalPercent& default_value) + : cluster_name_(cluster_name), runtime_key_(runtime_key), default_value_(default_value) {} + + // Router::RequestMirrorPolicy + const std::string& clusterName() const override { return cluster_name_; } + bool enabled(Runtime::Loader& runtime) const override { + return runtime_key_.empty() ? true + : runtime.snapshot().featureEnabled(runtime_key_, default_value_); + } + +private: + const std::string cluster_name_; + const std::string runtime_key_; + const envoy::type::v3::FractionalPercent default_value_; +}; + class RouteEntryImplBase : public RouteEntry, public Route, public std::enable_shared_from_this { @@ -43,6 +61,9 @@ class RouteEntryImplBase : public RouteEntry, const RateLimitPolicy& rateLimitPolicy() const override { return rate_limit_policy_; } bool stripServiceName() const override { return strip_service_name_; }; const Http::LowerCaseString& clusterHeader() const override { return cluster_header_; } + const std::vector>& requestMirrorPolicies() const override { + return mirror_policies_; + } // Router::Route const RouteEntry* routeEntry() const override; @@ -76,6 +97,10 @@ class RouteEntryImplBase : public RouteEntry, const RateLimitPolicy& rateLimitPolicy() const override { return parent_.rateLimitPolicy(); } bool stripServiceName() const override { return parent_.stripServiceName(); } const Http::LowerCaseString& clusterHeader() const override { return parent_.clusterHeader(); } + const std::vector>& + requestMirrorPolicies() const override { + return parent_.requestMirrorPolicies(); + } // Router::Route const RouteEntry* routeEntry() const override { return this; } @@ -101,6 +126,10 @@ class RouteEntryImplBase : public RouteEntry, const RateLimitPolicy& rateLimitPolicy() const override { return parent_.rateLimitPolicy(); } bool stripServiceName() const override { return parent_.stripServiceName(); } const Http::LowerCaseString& clusterHeader() const override { return parent_.clusterHeader(); } + const std::vector>& + requestMirrorPolicies() const override { + return parent_.requestMirrorPolicies(); + } // Router::Route const RouteEntry* routeEntry() const override { return this; } @@ -110,6 +139,9 @@ class RouteEntryImplBase : public RouteEntry, const std::string cluster_name_; }; + static std::vector> buildMirrorPolicies( + const envoy::extensions::filters::network::thrift_proxy::v3::RouteAction& route); + const std::string cluster_name_; const std::vector config_headers_; std::vector weighted_clusters_; @@ -118,6 +150,7 @@ class RouteEntryImplBase : public RouteEntry, const RateLimitPolicyImpl rate_limit_policy_; const bool strip_service_name_; const Http::LowerCaseString cluster_header_; + const std::vector> mirror_policies_; }; using RouteEntryImplBaseConstSharedPtr = std::shared_ptr; @@ -160,15 +193,34 @@ class RouteMatcher { std::vector routes_; }; +// Adapter from DecoderFilterCallbacks to UpstreamResponseCallbacks. +class UpstreamResponseCallbacksImpl : public UpstreamResponseCallbacks { +public: + UpstreamResponseCallbacksImpl(ThriftFilters::DecoderFilterCallbacks* callbacks) + : callbacks_(callbacks) {} + + void startUpstreamResponse(Transport& transport, Protocol& protocol) override { + callbacks_->startUpstreamResponse(transport, protocol); + } + ThriftFilters::ResponseStatus upstreamData(Buffer::Instance& buffer) override { + return callbacks_->upstreamData(buffer); + } + MessageMetadataSharedPtr responseMetadata() override { return callbacks_->responseMetadata(); } + bool responseSuccess() override { return callbacks_->responseSuccess(); } + +private: + ThriftFilters::DecoderFilterCallbacks* callbacks_{}; +}; + class Router : public Tcp::ConnectionPool::UpstreamCallbacks, public Upstream::LoadBalancerContextBase, public RequestOwner, - public ThriftFilters::DecoderFilter, - Logger::Loggable { + public ThriftFilters::DecoderFilter { public: Router(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix, - Stats::Scope& scope) - : RequestOwner(cluster_manager, stat_prefix, scope), passthrough_supported_(false) {} + Stats::Scope& scope, Runtime::Loader& runtime, ShadowWriter& shadow_writer) + : RequestOwner(cluster_manager, stat_prefix, scope), passthrough_supported_(false), + runtime_(runtime), shadow_writer_(shadow_writer) {} ~Router() override = default; @@ -196,6 +248,25 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, FilterStatus transportEnd() override; FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override; FilterStatus messageEnd() override; + FilterStatus passthroughData(Buffer::Instance& data) override; + FilterStatus structBegin(absl::string_view name) override; + FilterStatus structEnd() override; + FilterStatus fieldBegin(absl::string_view name, FieldType& field_type, + int16_t& field_id) override; + FilterStatus fieldEnd() override; + FilterStatus boolValue(bool& value) override; + FilterStatus byteValue(uint8_t& value) override; + FilterStatus int16Value(int16_t& value) override; + FilterStatus int32Value(int32_t& value) override; + FilterStatus int64Value(int64_t& value) override; + FilterStatus doubleValue(double& value) override; + FilterStatus stringValue(absl::string_view value) override; + FilterStatus mapBegin(FieldType& key_type, FieldType& value_type, uint32_t& size) override; + FilterStatus mapEnd() override; + FilterStatus listBegin(FieldType& elem_type, uint32_t& size) override; + FilterStatus listEnd() override; + FilterStatus setBegin(FieldType& elem_type, uint32_t& size) override; + FilterStatus setEnd() override; // Upstream::LoadBalancerContext const Network::Connection* downstreamConnection() const override; @@ -213,16 +284,18 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, void cleanup(); ThriftFilters::DecoderFilterCallbacks* callbacks_{}; + std::unique_ptr upstream_response_callbacks_{}; RouteConstSharedPtr route_{}; const RouteEntry* route_entry_{}; - Upstream::ClusterInfoConstSharedPtr cluster_; std::unique_ptr upstream_request_; Buffer::OwnedImpl upstream_request_buffer_; bool passthrough_supported_ : 1; uint64_t request_size_{}; - uint64_t response_size_{}; + Runtime::Loader& runtime_; + ShadowWriter& shadow_writer_; + std::vector> shadow_routers_{}; }; } // namespace Router diff --git a/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.cc b/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.cc new file mode 100644 index 0000000000000..abd51e1da50f7 --- /dev/null +++ b/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.cc @@ -0,0 +1,301 @@ +#include "source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h" + +#include + +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/thread_local_cluster.h" + +#include "source/common/common/utility.h" +#include "source/extensions/filters/network/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ThriftProxy { +namespace Router { + +absl::optional> +ShadowWriterImpl::submit(const std::string& cluster_name, MessageMetadataSharedPtr metadata, + TransportType original_transport, ProtocolType original_protocol) { + auto shadow_router = std::make_unique(*this, cluster_name, metadata, + original_transport, original_protocol); + const bool created = shadow_router->createUpstreamRequest(); + if (!created) { + stats_.shadow_request_submit_failure_.inc(); + return absl::nullopt; + } + + auto& active_routers = tls_->getTyped().activeRouters(); + + LinkedList::moveIntoList(std::move(shadow_router), active_routers); + return *active_routers.front(); +} + +ShadowRouterImpl::ShadowRouterImpl(ShadowWriterImpl& parent, const std::string& cluster_name, + MessageMetadataSharedPtr& metadata, TransportType transport_type, + ProtocolType protocol_type) + : RequestOwner(parent.clusterManager(), parent.statPrefix(), parent.scope()), parent_(parent), + cluster_name_(cluster_name), metadata_(metadata->clone()), transport_type_(transport_type), + protocol_type_(protocol_type), + transport_(NamedTransportConfigFactory::getFactory(transport_type).createTransport()), + protocol_(NamedProtocolConfigFactory::getFactory(protocol_type).createProtocol()) { + response_decoder_ = std::make_unique(*transport_, *protocol_); + upstream_response_callbacks_ = + std::make_unique(*response_decoder_); +} + +Event::Dispatcher& ShadowRouterImpl::dispatcher() { return parent_.dispatcher(); } + +bool ShadowRouterImpl::createUpstreamRequest() { + auto prepare_result = + prepareUpstreamRequest(cluster_name_, metadata_, transport_type_, protocol_type_, this); + if (prepare_result.exception.has_value()) { + return false; + } + + auto& upstream_req_info = prepare_result.upstream_request_info.value(); + + upstream_request_ = + std::make_unique(*this, *upstream_req_info.conn_pool_data, metadata_, + upstream_req_info.transport, upstream_req_info.protocol); + upstream_request_->start(); + return true; +} + +bool ShadowRouterImpl::requestStarted() const { + return upstream_request_->conn_data_ != nullptr && + upstream_request_->upgrade_response_ == nullptr; +} + +void ShadowRouterImpl::flushPendingCallbacks() { + if (pending_callbacks_.empty()) { + return; + } + + for (auto& cb : pending_callbacks_) { + cb(); + } + + pending_callbacks_.clear(); +} + +FilterStatus ShadowRouterImpl::runOrSave(std::function&& cb, + const std::function& on_save) { + if (requestStarted()) { + return cb(); + } + + pending_callbacks_.push_back(std::move(cb)); + + if (on_save) { + on_save(); + } + + return FilterStatus::Continue; +} + +FilterStatus ShadowRouterImpl::passthroughData(Buffer::Instance& data) { + if (requestStarted()) { + return ProtocolConverter::passthroughData(data); + } + + auto copied = std::make_shared(data); + auto cb = [copied = std::move(copied), this]() mutable -> FilterStatus { + return ProtocolConverter::passthroughData(*copied); + }; + pending_callbacks_.push_back(std::move(cb)); + + return FilterStatus::Continue; +} + +FilterStatus ShadowRouterImpl::structBegin(absl::string_view name) { + if (requestStarted()) { + return ProtocolConverter::structBegin(name); + } + + auto cb = [name_str = std::string(name), this]() -> FilterStatus { + return ProtocolConverter::structBegin(absl::string_view(name_str)); + }; + pending_callbacks_.push_back(std::move(cb)); + + return FilterStatus::Continue; +} + +FilterStatus ShadowRouterImpl::structEnd() { + return runOrSave([this]() -> FilterStatus { return ProtocolConverter::structEnd(); }); +} + +FilterStatus ShadowRouterImpl::fieldBegin(absl::string_view name, FieldType& field_type, + int16_t& field_id) { + if (requestStarted()) { + return ProtocolConverter::fieldBegin(name, field_type, field_id); + } + + auto cb = [name_str = std::string(name), field_type, field_id, this]() mutable -> FilterStatus { + return ProtocolConverter::fieldBegin(absl::string_view(name_str), field_type, field_id); + }; + pending_callbacks_.push_back(std::move(cb)); + + return FilterStatus::Continue; +} + +FilterStatus ShadowRouterImpl::fieldEnd() { + return runOrSave([this]() -> FilterStatus { return ProtocolConverter::fieldEnd(); }); +} + +FilterStatus ShadowRouterImpl::boolValue(bool& value) { + return runOrSave( + [value, this]() mutable -> FilterStatus { return ProtocolConverter::boolValue(value); }); +} + +FilterStatus ShadowRouterImpl::byteValue(uint8_t& value) { + return runOrSave( + [value, this]() mutable -> FilterStatus { return ProtocolConverter::byteValue(value); }); +} + +FilterStatus ShadowRouterImpl::int16Value(int16_t& value) { + return runOrSave( + [value, this]() mutable -> FilterStatus { return ProtocolConverter::int16Value(value); }); +} + +FilterStatus ShadowRouterImpl::int32Value(int32_t& value) { + return runOrSave( + [value, this]() mutable -> FilterStatus { return ProtocolConverter::int32Value(value); }); +} + +FilterStatus ShadowRouterImpl::int64Value(int64_t& value) { + return runOrSave( + [value, this]() mutable -> FilterStatus { return ProtocolConverter::int64Value(value); }); +} + +FilterStatus ShadowRouterImpl::doubleValue(double& value) { + return runOrSave( + [value, this]() mutable -> FilterStatus { return ProtocolConverter::doubleValue(value); }); +} + +FilterStatus ShadowRouterImpl::stringValue(absl::string_view value) { + if (requestStarted()) { + return ProtocolConverter::stringValue(value); + } + + auto cb = [value_str = std::string(value), this]() -> FilterStatus { + return ProtocolConverter::stringValue(absl::string_view(value_str)); + }; + pending_callbacks_.push_back(std::move(cb)); + + return FilterStatus::Continue; +} + +FilterStatus ShadowRouterImpl::mapBegin(FieldType& key_type, FieldType& value_type, + uint32_t& size) { + return runOrSave([key_type, value_type, size, this]() mutable -> FilterStatus { + return ProtocolConverter::mapBegin(key_type, value_type, size); + }); +} + +FilterStatus ShadowRouterImpl::mapEnd() { + return runOrSave([this]() -> FilterStatus { return ProtocolConverter::mapEnd(); }); +} + +FilterStatus ShadowRouterImpl::listBegin(FieldType& elem_type, uint32_t& size) { + return runOrSave([elem_type, size, this]() mutable -> FilterStatus { + return ProtocolConverter::listBegin(elem_type, size); + }); +} + +FilterStatus ShadowRouterImpl::listEnd() { + return runOrSave([this]() -> FilterStatus { return ProtocolConverter::listEnd(); }); +} + +FilterStatus ShadowRouterImpl::setBegin(FieldType& elem_type, uint32_t& size) { + return runOrSave([elem_type, size, this]() mutable -> FilterStatus { + return ProtocolConverter::setBegin(elem_type, size); + }); +} + +FilterStatus ShadowRouterImpl::setEnd() { + return runOrSave([this]() -> FilterStatus { return ProtocolConverter::setEnd(); }); +} + +FilterStatus ShadowRouterImpl::messageEnd() { + auto cb = [this]() -> FilterStatus { + ASSERT(upstream_request_->conn_data_ != nullptr); + + ProtocolConverter::messageEnd(); + const auto encode_size = upstream_request_->encodeAndWrite(upstream_request_buffer_); + addSize(encode_size); + recordUpstreamRequestSize(*cluster_, request_size_); + + request_sent_ = true; + + if (metadata_->messageType() == MessageType::Oneway) { + upstream_request_->releaseConnection(false); + } + + return FilterStatus::Continue; + }; + + return runOrSave(std::move(cb), [this]() -> void { request_ready_ = true; }); +} + +bool ShadowRouterImpl::requestInProgress() { + const bool connection_open = upstream_request_->conn_data_ != nullptr; + const bool connection_waiting = upstream_request_->conn_pool_handle_ != nullptr; + + // Connection open and message sent. + const bool message_sent = connection_open && request_sent_; + + // Request ready to go and connection ready or almost ready. + const bool message_ready = request_ready_ && (connection_open || connection_waiting); + + return message_sent || message_ready; +} + +void ShadowRouterImpl::onRouterDestroy() { + ASSERT(!deferred_deleting_); + + // Mark the shadow request to be destroyed when the response gets back + // or the upstream connection finally fails. + router_destroyed_ = true; + + if (!requestInProgress()) { + maybeCleanup(); + } +} + +bool ShadowRouterImpl::waitingForConnection() const { + return upstream_request_->conn_pool_handle_ != nullptr; +} + +void ShadowRouterImpl::maybeCleanup() { + if (removed_) { + return; + } + + ASSERT(!deferred_deleting_); + + if (router_destroyed_) { + removed_ = true; + upstream_request_->resetStream(); + parent_.remove(*this); + } +} + +void ShadowRouterImpl::onUpstreamData(Buffer::Instance& data, bool end_stream) { + const bool done = + upstream_request_->handleUpstreamData(data, end_stream, *upstream_response_callbacks_); + if (done) { + maybeCleanup(); + } +} + +void ShadowRouterImpl::onEvent(Network::ConnectionEvent event) { + upstream_request_->onEvent(event); + maybeCleanup(); +} + +} // namespace Router +} // namespace ThriftProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h b/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h new file mode 100644 index 0000000000000..dc43592b58cf9 --- /dev/null +++ b/source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h @@ -0,0 +1,308 @@ +#pragma once + +#include + +#include "envoy/event/dispatcher.h" +#include "envoy/router/router.h" +#include "envoy/stats/scope.h" +#include "envoy/stats/stats_macros.h" +#include "envoy/tcp/conn_pool.h" +#include "envoy/upstream/load_balancer.h" + +#include "source/common/common/linked_object.h" +#include "source/common/common/logger.h" +#include "source/common/upstream/load_balancer_impl.h" +#include "source/extensions/filters/network/thrift_proxy/app_exception_impl.h" +#include "source/extensions/filters/network/thrift_proxy/conn_manager.h" +#include "source/extensions/filters/network/thrift_proxy/router/router.h" +#include "source/extensions/filters/network/thrift_proxy/router/upstream_request.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ThriftProxy { +namespace Router { + +struct NullResponseDecoder : public DecoderCallbacks, public ProtocolConverter { + NullResponseDecoder(Transport& transport, Protocol& protocol) + : decoder_(std::make_unique(transport, protocol, *this)) { + initProtocolConverter(protocol, response_buffer_); + } + + virtual ThriftFilters::ResponseStatus upstreamData(Buffer::Instance& data) { + upstream_buffer_.move(data); + + bool underflow = false; + try { + underflow = onData(); + } catch (const AppException&) { + return ThriftFilters::ResponseStatus::Reset; + } catch (const EnvoyException&) { + return ThriftFilters::ResponseStatus::Reset; + } + + ASSERT(complete_ || underflow); + return complete_ ? ThriftFilters::ResponseStatus::Complete + : ThriftFilters::ResponseStatus::MoreData; + } + virtual bool onData() { + bool underflow = false; + decoder_->onData(upstream_buffer_, underflow); + return underflow; + } + MessageMetadataSharedPtr& responseMetadata() { + ASSERT(metadata_ != nullptr); + return metadata_; + } + bool responseSuccess() { return success_.value_or(false); } + + // ProtocolConverter + FilterStatus messageBegin(MessageMetadataSharedPtr metadata) override { + metadata_ = metadata; + first_reply_field_ = + (metadata->hasMessageType() && metadata->messageType() == MessageType::Reply); + return FilterStatus::Continue; + } + FilterStatus messageEnd() override { + if (first_reply_field_) { + success_ = true; + first_reply_field_ = false; + } + return FilterStatus::Continue; + } + FilterStatus fieldBegin(absl::string_view, FieldType&, int16_t& field_id) override { + if (first_reply_field_) { + success_ = (field_id == 0); + first_reply_field_ = false; + } + return FilterStatus::Continue; + } + FilterStatus transportBegin(MessageMetadataSharedPtr metadata) override { + UNREFERENCED_PARAMETER(metadata); + return FilterStatus::Continue; + } + FilterStatus transportEnd() override { + ASSERT(metadata_ != nullptr); + complete_ = true; + return FilterStatus::Continue; + } + + // DecoderCallbacks + DecoderEventHandler& newDecoderEventHandler() override { return *this; } + bool passthroughEnabled() const override { return true; } + + DecoderPtr decoder_; + Buffer::OwnedImpl response_buffer_; + Buffer::OwnedImpl upstream_buffer_; + MessageMetadataSharedPtr metadata_; + absl::optional success_; + bool complete_ : 1; + bool first_reply_field_ : 1; +}; +using NullResponseDecoderPtr = std::unique_ptr; + +// Adapter from NullResponseDecoder to UpstreamResponseCallbacks. +class ShadowUpstreamResponseCallbacksImpl : public UpstreamResponseCallbacks { +public: + ShadowUpstreamResponseCallbacksImpl(NullResponseDecoder& response_decoder) + : response_decoder_(response_decoder) {} + + void startUpstreamResponse(Transport&, Protocol&) override {} + ThriftFilters::ResponseStatus upstreamData(Buffer::Instance& buffer) override { + return response_decoder_.upstreamData(buffer); + } + MessageMetadataSharedPtr responseMetadata() override { + return response_decoder_.responseMetadata(); + } + bool responseSuccess() override { return response_decoder_.responseSuccess(); } + +private: + NullResponseDecoder& response_decoder_; +}; +using ShadowUpstreamResponseCallbacksImplPtr = std::unique_ptr; + +class ShadowWriterImpl; + +class ShadowRouterImpl : public ShadowRouterHandle, + public RequestOwner, + public Tcp::ConnectionPool::UpstreamCallbacks, + public Upstream::LoadBalancerContextBase, + public Event::DeferredDeletable, + public LinkedObject { +public: + ShadowRouterImpl(ShadowWriterImpl& parent, const std::string& cluster_name, + MessageMetadataSharedPtr& metadata, TransportType transport_type, + ProtocolType protocol_type); + ~ShadowRouterImpl() override = default; + + bool createUpstreamRequest(); + void maybeCleanup(); + void resetStream() { + if (upstream_request_ != nullptr) { + upstream_request_->releaseConnection(true); + } + } + + // ShadowRouterHandle + void onRouterDestroy() override; + bool waitingForConnection() const override; + RequestOwner& requestOwner() override { return *this; } + + // RequestOwner + Tcp::ConnectionPool::UpstreamCallbacks& upstreamCallbacks() override { return *this; } + Buffer::OwnedImpl& buffer() override { return upstream_request_buffer_; } + Event::Dispatcher& dispatcher() override; + void addSize(uint64_t size) override { request_size_ += size; } + void continueDecoding() override { flushPendingCallbacks(); } + void resetDownstreamConnection() override {} + void sendLocalReply(const ThriftProxy::DirectResponse&, bool) override {} + void recordResponseDuration(uint64_t value, Stats::Histogram::Unit unit) override { + recordClusterResponseDuration(*cluster_, value, unit); + } + + // RequestOwner::ProtocolConverter + FilterStatus transportBegin(MessageMetadataSharedPtr) override { return FilterStatus::Continue; } + FilterStatus transportEnd() override { return FilterStatus::Continue; } + FilterStatus messageEnd() override; + FilterStatus passthroughData(Buffer::Instance& data) override; + FilterStatus structBegin(absl::string_view name) override; + FilterStatus structEnd() override; + FilterStatus fieldBegin(absl::string_view name, FieldType& field_type, + int16_t& field_id) override; + FilterStatus fieldEnd() override; + FilterStatus boolValue(bool& value) override; + FilterStatus byteValue(uint8_t& value) override; + FilterStatus int16Value(int16_t& value) override; + FilterStatus int32Value(int32_t& value) override; + FilterStatus int64Value(int64_t& value) override; + FilterStatus doubleValue(double& value) override; + FilterStatus stringValue(absl::string_view value) override; + FilterStatus mapBegin(FieldType& key_type, FieldType& value_type, uint32_t& size) override; + FilterStatus mapEnd() override; + FilterStatus listBegin(FieldType& elem_type, uint32_t& size) override; + FilterStatus listEnd() override; + FilterStatus setBegin(FieldType& elem_type, uint32_t& size) override; + FilterStatus setEnd() override; + + // Tcp::ConnectionPool::UpstreamCallbacks + void onUpstreamData(Buffer::Instance& data, bool end_stream) override; + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + // Upstream::LoadBalancerContextBase + const Network::Connection* downstreamConnection() const override { return nullptr; } + const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override { return nullptr; } + + // Event::DeferredDeletable + void deleteIsPending() override { deferred_deleting_ = true; } + +private: + friend class ShadowWriterTest; + using ConverterCallback = std::function; + + void writeRequest(); + bool requestInProgress(); + bool requestStarted() const; + void flushPendingCallbacks(); + FilterStatus runOrSave(std::function&& cb, + const std::function& on_save = {}); + + ShadowWriterImpl& parent_; + const std::string cluster_name_; + MessageMetadataSharedPtr metadata_; + const TransportType transport_type_; + const ProtocolType protocol_type_; + TransportPtr transport_; + ProtocolPtr protocol_; + NullResponseDecoderPtr response_decoder_; + ShadowUpstreamResponseCallbacksImplPtr upstream_response_callbacks_; + bool router_destroyed_{}; + bool request_sent_{}; + Buffer::OwnedImpl upstream_request_buffer_; + std::unique_ptr upstream_request_; + uint64_t request_size_{}; + uint64_t response_size_{}; + bool request_ready_ : 1; + + std::list pending_callbacks_; + bool removed_{}; + bool deferred_deleting_{}; +}; + +#define ALL_SHADOW_WRITER_STATS(COUNTER, GAUGE, HISTOGRAM) COUNTER(shadow_request_submit_failure) + +struct ShadowWriterStats { + ALL_SHADOW_WRITER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) +}; + +class ActiveRouters : public ThreadLocal::ThreadLocalObject { +public: + ActiveRouters(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {} + ~ActiveRouters() override { + while (!active_routers_.empty()) { + auto& router = active_routers_.front(); + router->resetStream(); + remove(*router); + } + } + + std::list>& activeRouters() { return active_routers_; } + + void remove(ShadowRouterImpl& router) { + dispatcher_.deferredDelete(router.removeFromList(active_routers_)); + } + +private: + Event::Dispatcher& dispatcher_; + std::list> active_routers_; +}; + +class ShadowWriterImpl : public ShadowWriter, Logger::Loggable { +public: + ShadowWriterImpl(Upstream::ClusterManager& cm, const std::string& stat_prefix, + Stats::Scope& scope, Event::Dispatcher& dispatcher, + ThreadLocal::SlotAllocator& tls) + : cm_(cm), stat_prefix_(stat_prefix), scope_(scope), dispatcher_(dispatcher), + stats_(generateStats(stat_prefix, scope)), tls_(tls.allocateSlot()) { + + tls_->set([](Event::Dispatcher& dispatcher) -> ThreadLocal::ThreadLocalObjectSharedPtr { + return std::make_shared(dispatcher); + }); + } + + ~ShadowWriterImpl() override = default; + + void remove(ShadowRouterImpl& router) { tls_->getTyped().remove(router); } + + // Router::ShadowWriter + Upstream::ClusterManager& clusterManager() override { return cm_; } + const std::string& statPrefix() const override { return stat_prefix_; } + Stats::Scope& scope() override { return scope_; } + Event::Dispatcher& dispatcher() override { return dispatcher_; } + absl::optional> + submit(const std::string& cluster_name, MessageMetadataSharedPtr metadata, + TransportType original_transport, ProtocolType original_protocol) override; + +private: + friend class ShadowRouterImpl; + + ShadowWriterStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return ShadowWriterStats{ALL_SHADOW_WRITER_STATS(POOL_COUNTER_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix), + POOL_HISTOGRAM_PREFIX(scope, prefix))}; + } + + Upstream::ClusterManager& cm_; + const std::string stat_prefix_; + Stats::Scope& scope_; + Event::Dispatcher& dispatcher_; + ShadowWriterStats stats_; + ThreadLocal::SlotPtr tls_; +}; + +} // namespace Router +} // namespace ThriftProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/thrift_proxy/router/upstream_request.cc b/source/extensions/filters/network/thrift_proxy/router/upstream_request.cc index 8ac684414efbe..128c0bc9cd0d3 100644 --- a/source/extensions/filters/network/thrift_proxy/router/upstream_request.cc +++ b/source/extensions/filters/network/thrift_proxy/router/upstream_request.cc @@ -100,6 +100,132 @@ void UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_ onRequestStart(continue_decoding); } +void UpstreamRequest::handleUpgradeResponse(Buffer::Instance& data) { + ENVOY_LOG(trace, "reading upgrade response: {} bytes", data.length()); + if (!upgrade_response_->onData(data)) { + // Wait for more data. + return; + } + + ENVOY_LOG(debug, "upgrade response complete"); + protocol_->completeUpgrade(*conn_state_, *upgrade_response_); + upgrade_response_.reset(); + onRequestStart(true); +} + +ThriftFilters::ResponseStatus +UpstreamRequest::handleRegularResponse(Buffer::Instance& data, + UpstreamResponseCallbacks& callbacks) { + ENVOY_LOG(trace, "reading response: {} bytes", data.length()); + + if (!response_started_) { + callbacks.startUpstreamResponse(*transport_, *protocol_); + response_started_ = true; + } + + const auto& cluster = parent_.cluster(); + + const auto status = callbacks.upstreamData(data); + if (status == ThriftFilters::ResponseStatus::Complete) { + ENVOY_LOG(debug, "response complete"); + + parent_.recordUpstreamResponseSize(cluster, response_size_); + + switch (callbacks.responseMetadata()->messageType()) { + case MessageType::Reply: + parent_.incResponseReply(cluster); + if (callbacks.responseSuccess()) { + upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::ExtOriginRequestSuccess); + parent_.incResponseReplySuccess(cluster); + } else { + upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::ExtOriginRequestFailed); + parent_.incResponseReplyError(cluster); + } + break; + + case MessageType::Exception: + upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::ExtOriginRequestFailed); + parent_.incResponseException(cluster); + break; + + default: + parent_.incResponseInvalidType(cluster); + break; + } + onResponseComplete(); + } else if (status == ThriftFilters::ResponseStatus::Reset) { + // Note: invalid responses are not accounted in the response size histogram. + ENVOY_LOG(debug, "upstream reset"); + upstream_host_->outlierDetector().putResult(Upstream::Outlier::Result::ExtOriginRequestFailed); + resetStream(); + } + + return status; +} + +bool UpstreamRequest::handleUpstreamData(Buffer::Instance& data, bool end_stream, + UpstreamResponseCallbacks& callbacks) { + ASSERT(!response_complete_); + + response_size_ += data.length(); + + if (upgrade_response_ != nullptr) { + handleUpgradeResponse(data); + } else { + const auto status = handleRegularResponse(data, callbacks); + if (status != ThriftFilters::ResponseStatus::MoreData) { + return true; + } + } + + if (end_stream) { + // Response is incomplete, but no more data is coming. + ENVOY_LOG(debug, "response underflow"); + onResponseComplete(); + onResetStream(ConnectionPool::PoolFailureReason::RemoteConnectionFailure); + return true; + } + + return false; +} + +void UpstreamRequest::onEvent(Network::ConnectionEvent event) { + ASSERT(!response_complete_); + + switch (event) { + case Network::ConnectionEvent::RemoteClose: + ENVOY_LOG(debug, "upstream remote close"); + onResetStream(ConnectionPool::PoolFailureReason::RemoteConnectionFailure); + break; + case Network::ConnectionEvent::LocalClose: + ENVOY_LOG(debug, "upstream local close"); + onResetStream(ConnectionPool::PoolFailureReason::LocalConnectionFailure); + break; + default: + // Connected is consumed by the connection pool. + NOT_REACHED_GCOVR_EXCL_LINE; + } + + releaseConnection(false); +} + +uint64_t UpstreamRequest::encodeAndWrite(Buffer::OwnedImpl& request_buffer) { + Buffer::OwnedImpl transport_buffer; + + metadata_->setProtocol(protocol_->type()); + transport_->encodeFrame(transport_buffer, *metadata_, request_buffer); + + uint64_t size = transport_buffer.length(); + + conn_data_->connection().write(transport_buffer, false); + onRequestComplete(); + + return size; +} + void UpstreamRequest::onRequestStart(bool continue_decoding) { auto& buffer = parent_.buffer(); parent_.initProtocolConverter(*protocol_, buffer); diff --git a/source/extensions/filters/network/thrift_proxy/router/upstream_request.h b/source/extensions/filters/network/thrift_proxy/router/upstream_request.h index f5129ce583184..287610b6b98cc 100644 --- a/source/extensions/filters/network/thrift_proxy/router/upstream_request.h +++ b/source/extensions/filters/network/thrift_proxy/router/upstream_request.h @@ -3,7 +3,9 @@ #include "envoy/common/time.h" #include "envoy/tcp/conn_pool.h" +#include "source/common/common/logger.h" #include "source/extensions/filters/network/thrift_proxy/decoder_events.h" +#include "source/extensions/filters/network/thrift_proxy/filters/filter.h" #include "source/extensions/filters/network/thrift_proxy/metadata.h" #include "source/extensions/filters/network/thrift_proxy/router/router.h" #include "source/extensions/filters/network/thrift_proxy/thrift.h" @@ -14,7 +16,18 @@ namespace NetworkFilters { namespace ThriftProxy { namespace Router { -struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks { +class UpstreamResponseCallbacks { +public: + virtual ~UpstreamResponseCallbacks() = default; + + virtual void startUpstreamResponse(Transport& transport, Protocol& protocol) PURE; + virtual ThriftFilters::ResponseStatus upstreamData(Buffer::Instance& buffer) PURE; + virtual MessageMetadataSharedPtr responseMetadata() PURE; + virtual bool responseSuccess() PURE; +}; + +struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks, + Logger::Loggable { UpstreamRequest(RequestOwner& parent, Upstream::TcpPoolData& pool_data, MessageMetadataSharedPtr& metadata, TransportType transport_type, ProtocolType protocol_type); @@ -31,6 +44,13 @@ struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks { void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, Upstream::HostDescriptionConstSharedPtr host) override; + bool handleUpstreamData(Buffer::Instance& data, bool end_stream, + UpstreamResponseCallbacks& callbacks); + void handleUpgradeResponse(Buffer::Instance& data); + ThriftFilters::ResponseStatus handleRegularResponse(Buffer::Instance& data, + UpstreamResponseCallbacks& callbacks); + uint64_t encodeAndWrite(Buffer::OwnedImpl& request_buffer); + void onEvent(Network::ConnectionEvent event); void onRequestStart(bool continue_decoding); void onRequestComplete(); void onResponseComplete(); @@ -56,6 +76,7 @@ struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks { bool charged_response_timing_{false}; MonotonicTime downstream_request_complete_time_; + uint64_t response_size_{}; }; } // namespace Router diff --git a/source/extensions/filters/network/well_known_names.h b/source/extensions/filters/network/well_known_names.h index dc2027c542c74..0017db6f1ff73 100644 --- a/source/extensions/filters/network/well_known_names.h +++ b/source/extensions/filters/network/well_known_names.h @@ -26,7 +26,7 @@ class NetworkFilterNameValues { const std::string DubboProxy = "envoy.filters.network.dubbo_proxy"; // Envoy mobile http connection manager. const std::string EnvoyMobileHttpConnectionManager = - "envoy.filters.network.http_connection_manager"; + "envoy.filters.network.envoy_mobile_http_connection_manager"; // HTTP connection manager filter const std::string HttpConnectionManager = "envoy.filters.network.http_connection_manager"; // Local rate limit filter diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index f2e9e5fcf9b13..1e869093389d2 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -206,8 +206,7 @@ bool DnsFilterEnvoyConfig::loadServerConfig( // is thrown. If no table can be read, the filter will refer all queries to an external // DNS server, if configured, otherwise all queries will be responded to with Name Error. MessageUtil::loadFromFile(datasource.filename(), table, - ProtobufMessage::getNullValidationVisitor(), api_, - false /* do_boosting */); + ProtobufMessage::getNullValidationVisitor(), api_); data_source_loaded = true; } catch (const ProtobufMessage::UnknownProtoFieldException& e) { ENVOY_LOG(warn, "Invalid field in DNS Filter datasource configuration: {}", e.what()); diff --git a/source/extensions/formatter/metadata/BUILD b/source/extensions/formatter/metadata/BUILD new file mode 100644 index 0000000000000..8d1bf6bf39ef4 --- /dev/null +++ b/source/extensions/formatter/metadata/BUILD @@ -0,0 +1,31 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "metadata_lib", + srcs = ["metadata.cc"], + hdrs = ["metadata.h"], + deps = [ + "//source/common/formatter:substitution_formatter_lib", + "//source/common/protobuf:utility_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + "//envoy/registry", + "//source/extensions/formatter/metadata:metadata_lib", + "@envoy_api//envoy/extensions/formatter/metadata/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/formatter/metadata/config.cc b/source/extensions/formatter/metadata/config.cc new file mode 100644 index 0000000000000..7aa27b8829100 --- /dev/null +++ b/source/extensions/formatter/metadata/config.cc @@ -0,0 +1,26 @@ +#include "source/extensions/formatter/metadata/config.h" + +#include "envoy/extensions/formatter/metadata/v3/metadata.pb.h" + +#include "source/extensions/formatter/metadata/metadata.h" + +namespace Envoy { +namespace Extensions { +namespace Formatter { + +::Envoy::Formatter::CommandParserPtr +MetadataFormatterFactory::createCommandParserFromProto(const Protobuf::Message&) { + return std::make_unique(); +} + +ProtobufTypes::MessagePtr MetadataFormatterFactory::createEmptyConfigProto() { + return std::make_unique(); +} + +std::string MetadataFormatterFactory::name() const { return "envoy.formatter.metadata"; } + +REGISTER_FACTORY(MetadataFormatterFactory, MetadataFormatterFactory::CommandParserFactory); + +} // namespace Formatter +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/formatter/metadata/config.h b/source/extensions/formatter/metadata/config.h new file mode 100644 index 0000000000000..ec080f8d806d0 --- /dev/null +++ b/source/extensions/formatter/metadata/config.h @@ -0,0 +1,19 @@ +#pragma once + +#include "source/common/formatter/substitution_formatter.h" + +namespace Envoy { +namespace Extensions { +namespace Formatter { + +class MetadataFormatterFactory : public ::Envoy::Formatter::CommandParserFactory { +public: + ::Envoy::Formatter::CommandParserPtr + createCommandParserFromProto(const Protobuf::Message&) override; + ProtobufTypes::MessagePtr createEmptyConfigProto() override; + std::string name() const override; +}; + +} // namespace Formatter +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/formatter/metadata/metadata.cc b/source/extensions/formatter/metadata/metadata.cc new file mode 100644 index 0000000000000..869096f9965db --- /dev/null +++ b/source/extensions/formatter/metadata/metadata.cc @@ -0,0 +1,80 @@ +#include "source/extensions/formatter/metadata/metadata.h" + +#include + +#include "source/common/config/metadata.h" +#include "source/common/formatter/substitution_formatter.h" +#include "source/common/http/utility.h" +#include "source/common/protobuf/utility.h" +#include "source/common/runtime/runtime_features.h" + +namespace Envoy { +namespace Extensions { +namespace Formatter { + +// Metadata formatter for route's metadata. +class RouteMetadataFormatter : public ::Envoy::Formatter::MetadataFormatter { +public: + RouteMetadataFormatter(const std::string& filter_namespace, const std::vector& path, + absl::optional max_length) + : ::Envoy::Formatter::MetadataFormatter(filter_namespace, path, max_length, + [](const StreamInfo::StreamInfo& stream_info) + -> const envoy::config::core::v3::Metadata* { + auto route = stream_info.route(); + + if (route == nullptr) { + return nullptr; + } + return &route->metadata(); + }) {} +}; + +// Constructor registers all types of supported metadata along with the +// handlers accessing the required metadata type. +MetadataFormatterCommandParser::MetadataFormatterCommandParser() { + metadata_formatter_providers_["DYNAMIC"] = [](const std::string& filter_namespace, + const std::vector& path, + absl::optional max_length) { + return std::make_unique<::Envoy::Formatter::DynamicMetadataFormatter>(filter_namespace, path, + max_length); + }; + metadata_formatter_providers_["CLUSTER"] = [](const std::string& filter_namespace, + const std::vector& path, + absl::optional max_length) { + return std::make_unique<::Envoy::Formatter::ClusterMetadataFormatter>(filter_namespace, path, + max_length); + }; + metadata_formatter_providers_["ROUTE"] = [](const std::string& filter_namespace, + const std::vector& path, + absl::optional max_length) { + return std::make_unique(filter_namespace, path, max_length); + }; +} + +::Envoy::Formatter::FormatterProviderPtr +MetadataFormatterCommandParser::parse(const std::string& token, size_t, size_t) const { + constexpr absl::string_view METADATA_TOKEN = "METADATA("; + if (absl::StartsWith(token, METADATA_TOKEN)) { + // Extract type of metadata and keys. + std::string type, filter_namespace; + absl::optional max_length; + std::vector path; + const size_t start = METADATA_TOKEN.size(); + + ::Envoy::Formatter::SubstitutionFormatParser::parseCommand(token, start, ':', max_length, type, + filter_namespace, path); + + auto provider = metadata_formatter_providers_.find(type); + if (provider == metadata_formatter_providers_.end()) { + throw EnvoyException(absl::StrCat(type, " is not supported type of metadata")); + } + + // Return a pointer to formatter provider. + return provider->second(filter_namespace, path, max_length); + } + return nullptr; +} + +} // namespace Formatter +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/formatter/metadata/metadata.h b/source/extensions/formatter/metadata/metadata.h new file mode 100644 index 0000000000000..3f27de8ad2bc7 --- /dev/null +++ b/source/extensions/formatter/metadata/metadata.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +#include "envoy/config/typed_config.h" +#include "envoy/registry/registry.h" + +#include "source/common/formatter/substitution_formatter.h" + +namespace Envoy { +namespace Extensions { +namespace Formatter { + +// Access log handler for METADATA( command. +class MetadataFormatterCommandParser : public ::Envoy::Formatter::CommandParser { +public: + MetadataFormatterCommandParser(); + ::Envoy::Formatter::FormatterProviderPtr parse(const std::string& token, size_t, + size_t) const override; + +private: + // Map used to dispatch types of metadata to individual handlers which will + // access required metadata object. + using FormatterProviderFunc = std::function<::Envoy::Formatter::FormatterProviderPtr( + const std::string& filter_namespace, const std::vector& path, + absl::optional max_length)>; + std::map metadata_formatter_providers_; +}; + +} // namespace Formatter +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/health_checkers/redis/utility.h b/source/extensions/health_checkers/redis/utility.h index b9c5de5848c2a..a3b90d6d2df9e 100644 --- a/source/extensions/health_checkers/redis/utility.h +++ b/source/extensions/health_checkers/redis/utility.h @@ -21,9 +21,7 @@ getRedisHealthCheckConfig(const envoy::config::core::v3::HealthCheck& health_che ProtobufTypes::MessagePtr config = ProtobufTypes::MessagePtr{new envoy::extensions::health_checkers::redis::v3::Redis()}; Envoy::Config::Utility::translateOpaqueConfig( - health_check_config.custom_health_check().typed_config(), - health_check_config.custom_health_check().hidden_envoy_deprecated_config(), - validation_visitor, *config); + health_check_config.custom_health_check().typed_config(), validation_visitor, *config); return MessageUtil::downcastAndValidate< const envoy::extensions::health_checkers::redis::v3::Redis&>(*config, validation_visitor); } diff --git a/source/extensions/io_socket/user_space/BUILD b/source/extensions/io_socket/user_space/BUILD index 334fe51a916e4..01a5948f4c9e4 100644 --- a/source/extensions/io_socket/user_space/BUILD +++ b/source/extensions/io_socket/user_space/BUILD @@ -13,6 +13,7 @@ envoy_cc_extension( name = "config", srcs = ["config.h"], deps = [ + ":io_handle_impl_lib", ], ) diff --git a/source/extensions/io_socket/user_space/io_handle_impl.cc b/source/extensions/io_socket/user_space/io_handle_impl.cc index bbdb37e3b09ef..55f425ec542cc 100644 --- a/source/extensions/io_socket/user_space/io_handle_impl.cc +++ b/source/extensions/io_socket/user_space/io_handle_impl.cc @@ -274,17 +274,38 @@ Network::IoHandlePtr IoHandleImpl::accept(struct sockaddr*, socklen_t*) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } -Api::SysCallIntResult IoHandleImpl::connect(Network::Address::InstanceConstSharedPtr) { - // Buffered Io handle should always be considered as connected. - // Use write or read to determine if peer is closed. - return {0, 0}; +Api::SysCallIntResult IoHandleImpl::connect(Network::Address::InstanceConstSharedPtr address) { + if (peer_handle_ != nullptr) { + // Buffered Io handle should always be considered as connected unless the server peer cannot be + // found. Use write or read to determine if peer is closed. + return {0, 0}; + } else { + ENVOY_LOG(debug, "user namespace handle {} connect to previously closed peer {}.", + static_cast(this), address->asStringView()); + return Api::SysCallIntResult{-1, SOCKET_ERROR_INVAL}; + } } Api::SysCallIntResult IoHandleImpl::setOption(int, int, const void*, socklen_t) { return makeInvalidSyscallResult(); } -Api::SysCallIntResult IoHandleImpl::getOption(int, int, void*, socklen_t*) { +Api::SysCallIntResult IoHandleImpl::getOption(int level, int optname, void* optval, + socklen_t* optlen) { + // Check result of connect(). It is either connected or closed. + if (level == SOL_SOCKET && optname == SO_ERROR) { + if (peer_handle_ != nullptr) { + // The peer is valid at this comment. Consider it as connected. + *optlen = sizeof(int); + *static_cast(optval) = 0; + return Api::SysCallIntResult{0, 0}; + } else { + // The peer is closed. Reset the option value to non-zero. + *optlen = sizeof(int); + *static_cast(optval) = SOCKET_ERROR_INVAL; + return Api::SysCallIntResult{0, 0}; + } + } return makeInvalidSyscallResult(); } diff --git a/source/extensions/io_socket/user_space/io_handle_impl.h b/source/extensions/io_socket/user_space/io_handle_impl.h index 71d3248d47223..71d2161be17f3 100644 --- a/source/extensions/io_socket/user_space/io_handle_impl.h +++ b/source/extensions/io_socket/user_space/io_handle_impl.h @@ -143,6 +143,8 @@ class IoHandleImpl final : public Network::IoHandle, ASSERT(!peer_handle_); ASSERT(!write_shutdown_); peer_handle_ = writable_peer; + ENVOY_LOG(trace, "io handle {} set peer handle to {}.", static_cast(this), + static_cast(writable_peer)); } private: diff --git a/source/extensions/key_value/file_based/BUILD b/source/extensions/key_value/file_based/BUILD new file mode 100644 index 0000000000000..4603b869b5908 --- /dev/null +++ b/source/extensions/key_value/file_based/BUILD @@ -0,0 +1,25 @@ +# A file based key value store. +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "config_lib", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + "//envoy/common:key_value_store_interface", + "//envoy/event:dispatcher_interface", + "//envoy/filesystem:filesystem_interface", + "//envoy/registry", + "//source/common/common:key_value_store_lib", + "@envoy_api//envoy/extensions/common/key_value/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/key_value/file_based/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/key_value/file_based/config.cc b/source/extensions/key_value/file_based/config.cc new file mode 100644 index 0000000000000..6fbd99b77cffc --- /dev/null +++ b/source/extensions/key_value/file_based/config.cc @@ -0,0 +1,62 @@ +#include "source/extensions/key_value/file_based/config.h" + +#include "envoy/registry/registry.h" + +namespace Envoy { +namespace Extensions { +namespace KeyValue { + +FileBasedKeyValueStore::FileBasedKeyValueStore(Event::Dispatcher& dispatcher, + std::chrono::seconds flush_interval, + Filesystem::Instance& file_system, + const std::string& filename) + : KeyValueStoreBase(dispatcher, flush_interval), file_system_(file_system), + filename_(filename) { + if (!file_system_.fileExists(filename_)) { + ENVOY_LOG(info, "File for key value store does not yet exist: {}", filename); + return; + } + const std::string contents = file_system_.fileReadToEnd(filename_); + if (!parseContents(contents, store_)) { + ENVOY_LOG(warn, "Failed to parse key value store file {}", filename); + } +} + +void FileBasedKeyValueStore::flush() { + static constexpr Filesystem::FlagSet DefaultFlags{1 << Filesystem::File::Operation::Write | + 1 << Filesystem::File::Operation::Create}; + Filesystem::FilePathAndType file_info{Filesystem::DestinationType::File, filename_}; + auto file = file_system_.createFile(file_info); + if (!file || !file->open(DefaultFlags).return_value_) { + ENVOY_LOG(error, "Failed to flush cache to file {}", filename_); + return; + } + for (const auto& it : store_) { + file->write(absl::StrCat(it.first.length(), "\n")); + file->write(it.first); + file->write(absl::StrCat(it.second.length(), "\n")); + file->write(it.second); + } + file->close(); +} + +KeyValueStorePtr FileBasedKeyValueStoreFactory::createStore( + const Protobuf::Message& config, ProtobufMessage::ValidationVisitor& validation_visitor, + Event::Dispatcher& dispatcher, Filesystem::Instance& file_system) { + const auto& typed_config = MessageUtil::downcastAndValidate< + const envoy::extensions::common::key_value::v3::KeyValueStoreConfig&>(config, + validation_visitor); + const auto file_config = MessageUtil::anyConvertAndValidate< + envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig>( + typed_config.config().typed_config(), validation_visitor); + auto seconds = + std::chrono::seconds(DurationUtil::durationToSeconds(file_config.flush_interval())); + return std::make_unique(dispatcher, seconds, file_system, + file_config.filename()); +} + +REGISTER_FACTORY(FileBasedKeyValueStoreFactory, KeyValueStoreFactory); + +} // namespace KeyValue +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/key_value/file_based/config.h b/source/extensions/key_value/file_based/config.h new file mode 100644 index 0000000000000..414b7d7473185 --- /dev/null +++ b/source/extensions/key_value/file_based/config.h @@ -0,0 +1,49 @@ +#include "envoy/common/key_value_store.h" +#include "envoy/extensions/common/key_value/v3/config.pb.h" +#include "envoy/extensions/common/key_value/v3/config.pb.validate.h" +#include "envoy/extensions/key_value/file_based/v3/config.pb.h" +#include "envoy/extensions/key_value/file_based/v3/config.pb.validate.h" + +#include "source/common/common/key_value_store_base.h" + +namespace Envoy { +namespace Extensions { +namespace KeyValue { + +// A filesystem based key value store, which loads from and flushes to the file +// provided. +// +// All keys and values are flushed to a single file as +// [length]\n[key][length]\n[value] +class FileBasedKeyValueStore : public KeyValueStoreBase { +public: + FileBasedKeyValueStore(Event::Dispatcher& dispatcher, std::chrono::seconds flush_interval, + Filesystem::Instance& file_system, const std::string& filename); + // KeyValueStore + void flush() override; + +private: + Filesystem::Instance& file_system_; + const std::string filename_; +}; + +class FileBasedKeyValueStoreFactory : public KeyValueStoreFactory { +public: + // KeyValueStoreFactory + KeyValueStorePtr createStore(const Protobuf::Message& config, + ProtobufMessage::ValidationVisitor& validation_visitor, + Event::Dispatcher& dispatcher, + Filesystem::Instance& file_system) override; + + // TypedFactory + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return ProtobufTypes::MessagePtr{ + new envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig()}; + } + + std::string name() const override { return "envoy.key_value.file_based"; } +}; + +} // namespace KeyValue +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/stat_sinks/hystrix/hystrix.cc b/source/extensions/stat_sinks/hystrix/hystrix.cc index 57a22f7e63a7e..1df26f9e9448f 100644 --- a/source/extensions/stat_sinks/hystrix/hystrix.cc +++ b/source/extensions/stat_sinks/hystrix/hystrix.cc @@ -320,7 +320,7 @@ Http::Code HystrixSink::handlerHystrixEventStream(absl::string_view, auto on_destroy_callback = [this, &stream_decoder_filter_callbacks]() { ENVOY_LOG(debug, "stopped sending data to hystrix dashboard on port {}", stream_decoder_filter_callbacks.connection() - ->addressProvider() + ->connectionInfoProvider() .remoteAddress() ->asString()); @@ -331,9 +331,11 @@ Http::Code HystrixSink::handlerHystrixEventStream(absl::string_view, // Add the callback to the admin_filter list of callbacks admin_stream.addOnDestroyCallback(std::move(on_destroy_callback)); - ENVOY_LOG( - debug, "started sending data to hystrix dashboard on port {}", - stream_decoder_filter_callbacks.connection()->addressProvider().remoteAddress()->asString()); + ENVOY_LOG(debug, "started sending data to hystrix dashboard on port {}", + stream_decoder_filter_callbacks.connection() + ->connectionInfoProvider() + .remoteAddress() + ->asString()); return Http::Code::OK; } diff --git a/source/extensions/stat_sinks/metrics_service/BUILD b/source/extensions/stat_sinks/metrics_service/BUILD index 97d79e032f030..2f43ccf235035 100644 --- a/source/extensions/stat_sinks/metrics_service/BUILD +++ b/source/extensions/stat_sinks/metrics_service/BUILD @@ -7,7 +7,7 @@ load( licenses(["notice"]) # Apache 2 -# Stats sink for the gRPC metrics service: api/envoy/service/metrics/v2/metrics_service.proto +# Stats sink for the gRPC metrics service: api/envoy/service/metrics/v3/metrics_service.proto envoy_extension_package() @@ -34,8 +34,7 @@ envoy_cc_library( "//source/common/common:assert_lib", "//source/common/config:api_version_lib", "//source/common/protobuf", - "@envoy_api//envoy/config/metrics/v2:pkg_cc_proto", - "@envoy_api//envoy/service/metrics/v2:pkg_cc_proto", + "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/stat_sinks/metrics_service/config.cc b/source/extensions/stat_sinks/metrics_service/config.cc index 716e62fd34a7d..6e1841b0302f2 100644 --- a/source/extensions/stat_sinks/metrics_service/config.cc +++ b/source/extensions/stat_sinks/metrics_service/config.cc @@ -25,7 +25,7 @@ MetricsServiceSinkFactory::createStatsSink(const Protobuf::Message& config, MessageUtil::downcastAndValidate( config, server.messageValidationContext().staticValidationVisitor()); const auto& grpc_service = sink_config.grpc_service(); - const auto transport_api_version = Config::Utility::getAndCheckTransportVersion(sink_config); + Config::Utility::checkTransportVersion(sink_config); ENVOY_LOG(debug, "Metrics Service gRPC service configuration: {}", grpc_service.DebugString()); std::shared_ptr( server.clusterManager().grpcAsyncClientManager().getOrCreateRawAsyncClient( grpc_service, server.scope(), false, Grpc::CacheOption::CacheWhenRuntimeEnabled), - server.localInfo(), transport_api_version); + server.localInfo()); return std::make_unique>( diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.cc b/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.cc index 3987969712f28..0a9626db01e35 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.cc +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.cc @@ -1,7 +1,6 @@ #include "source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h" -#include "envoy/config/metrics/v2/metrics_service.pb.h" -#include "envoy/service/metrics/v2/metrics_service.pb.h" +#include "envoy/config/metrics/v3/metrics_service.pb.h" #include "source/common/common/assert.h" #include "source/common/common/fmt.h" @@ -14,19 +13,12 @@ namespace StatSinks { namespace MetricsService { void validateProtoDescriptors() { - // https://github.com/envoyproxy/envoy/issues/9639 - const API_NO_BOOST(envoy::service::metrics::v2::StreamMetricsMessage) _dummy_service_v2; - // https://github.com/envoyproxy/envoy/pull/9618 made it necessary to register the previous - // version's config descriptor by calling ApiTypeOracle::getEarlierVersionDescriptor which has an - // assertion for nullptr types. - const API_NO_BOOST(envoy::config::metrics::v2::MetricsServiceConfig) _dummy_config_v2; - - const auto method = "envoy.service.metrics.v2.MetricsService.StreamMetrics"; + const auto method = "envoy.service.metrics.v3.MetricsService.StreamMetrics"; RELEASE_ASSERT(Protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) != nullptr, ""); - const auto config = "envoy.config.metrics.v2.MetricsServiceConfig"; + const auto config = "envoy.config.metrics.v3.MetricsServiceConfig"; // Keeping this as an ASSERT because ApiTypeOracle::getEarlierVersionDescriptor also has an // ASSERT. diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc index 5e5c602e206f7..8c6cebeecccd0 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.cc @@ -18,17 +18,13 @@ namespace Extensions { namespace StatSinks { namespace MetricsService { -GrpcMetricsStreamerImpl::GrpcMetricsStreamerImpl( - Grpc::RawAsyncClientSharedPtr raw_async_client, const LocalInfo::LocalInfo& local_info, - envoy::config::core::v3::ApiVersion transport_api_version) +GrpcMetricsStreamerImpl::GrpcMetricsStreamerImpl(Grpc::RawAsyncClientSharedPtr raw_async_client, + const LocalInfo::LocalInfo& local_info) : GrpcMetricsStreamer(raw_async_client), local_info_(local_info), - service_method_( - Grpc::VersionedMethods("envoy.service.metrics.v3.MetricsService.StreamMetrics", - "envoy.service.metrics.v2.MetricsService.StreamMetrics") - .getMethodDescriptorForVersion(transport_api_version)), - transport_api_version_(transport_api_version) {} + service_method_(*Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.metrics.v3.MetricsService.StreamMetrics")) {} void GrpcMetricsStreamerImpl::send(MetricsPtr&& metrics) { envoy::service::metrics::v3::StreamMetricsMessage message; @@ -42,7 +38,7 @@ void GrpcMetricsStreamerImpl::send(MetricsPtr&& metrics) { *identifier->mutable_node() = local_info_.node(); } if (stream_ != nullptr) { - stream_->sendMessage(message, transport_api_version_, false); + stream_->sendMessage(message, false); } } diff --git a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h index 90496ec06edad..5a1bab6bcf527 100644 --- a/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h +++ b/source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h @@ -64,8 +64,7 @@ class GrpcMetricsStreamerImpl envoy::service::metrics::v3::StreamMetricsResponse> { public: GrpcMetricsStreamerImpl(Grpc::RawAsyncClientSharedPtr raw_async_client, - const LocalInfo::LocalInfo& local_info, - envoy::config::core::v3::ApiVersion transport_api_version); + const LocalInfo::LocalInfo& local_info); // GrpcMetricsStreamer void send(MetricsPtr&& metrics) override; @@ -76,7 +75,6 @@ class GrpcMetricsStreamerImpl private: const LocalInfo::LocalInfo& local_info_; const Protobuf::MethodDescriptor& service_method_; - const envoy::config::core::v3::ApiVersion transport_api_version_; }; using GrpcMetricsStreamerImplPtr = std::unique_ptr; diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc index aa1856a218003..e5c37a4fd060c 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.cc +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.cc @@ -34,7 +34,7 @@ class OpenTracingHeadersWriter : public opentracing::HTTPHeadersWriter { opentracing::expected Set(opentracing::string_view key, opentracing::string_view value) const override { Http::LowerCaseString lowercase_key{{key.data(), key.size()}}; - trace_context_.setTraceContext(lowercase_key, {value.data(), value.size()}); + trace_context_.setByKey(lowercase_key, {value.data(), value.size()}); return {}; } @@ -57,7 +57,7 @@ class OpenTracingHeadersReader : public opentracing::HTTPHeadersReader { opentracing::expected LookupKey(opentracing::string_view key) const override { Http::LowerCaseString lowercase_key{{key.data(), key.size()}}; - const auto entry = trace_context_.getTraceContext(lowercase_key); + const auto entry = trace_context_.getByKey(lowercase_key); if (entry.has_value()) { return opentracing::string_view{entry.value().data(), entry.value().length()}; } else { @@ -66,34 +66,16 @@ class OpenTracingHeadersReader : public opentracing::HTTPHeadersReader { } opentracing::expected ForeachKey(OpenTracingCb f) const override { - // TODO(wbpcode): TraceContext currently does not provide an API to traverse all entries. So - // dynamic_cast has to be used here. This is a temporary compromise to ensure that the existing - // functions are correct. After TraceContext provides the iterative API, this part of the code - // needs to be rewritten. - const auto headers = dynamic_cast(&trace_context_); - if (headers != nullptr) { - headers->iterate(headerMapCallback(f)); - } + trace_context_.forEach([cb = std::move(f)](absl::string_view key, absl::string_view val) { + opentracing::string_view opentracing_key{key.data(), key.length()}; + opentracing::string_view opentracing_val{val.data(), val.length()}; + return static_cast(cb(opentracing_key, opentracing_val)); + }); return {}; } private: const Tracing::TraceContext& trace_context_; - - static Http::HeaderMap::ConstIterateCb headerMapCallback(OpenTracingCb callback) { - return [callback = - std::move(callback)](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { - opentracing::string_view key{header.key().getStringView().data(), - header.key().getStringView().length()}; - opentracing::string_view value{header.value().getStringView().data(), - header.value().getStringView().length()}; - if (callback(key, value)) { - return Http::HeaderMap::Iterate::Continue; - } else { - return Http::HeaderMap::Iterate::Break; - } - }; - } }; } // namespace @@ -137,7 +119,7 @@ void OpenTracingSpan::injectContext(Tracing::TraceContext& trace_context) { return; } const std::string current_span_context = oss.str(); - trace_context.setTraceContextReferenceKey( + trace_context.setByReferenceKey( Http::CustomHeaders::get().OtSpanContext, Base64::encode(current_span_context.c_str(), current_span_context.length())); } else { @@ -178,7 +160,7 @@ Tracing::SpanPtr OpenTracingDriver::startSpan(const Tracing::Config& config, std::unique_ptr active_span; std::unique_ptr parent_span_ctx; - const auto entry = trace_context.getTraceContext(Http::CustomHeaders::get().OtSpanContext); + const auto entry = trace_context.getByKey(Http::CustomHeaders::get().OtSpanContext); if (propagation_mode == PropagationMode::SingleHeader && entry.has_value()) { opentracing::expected> parent_span_ctx_maybe; std::string parent_context = Base64::decode(std::string(entry.value())); diff --git a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc index 5a8e85dd3f43f..a6319fd7da701 100644 --- a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc +++ b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc @@ -95,7 +95,7 @@ startSpanHelper(const std::string& name, bool traced, const Tracing::TraceContex bool found = false; switch (incoming) { case OpenCensusConfig::TRACE_CONTEXT: { - const auto entry = trace_context.getTraceContext(Constants::get().TRACEPARENT); + const auto entry = trace_context.getByKey(Constants::get().TRACEPARENT); if (entry.has_value()) { found = true; // This is an implicitly untrusted header, so only the first value is used. @@ -104,7 +104,7 @@ startSpanHelper(const std::string& name, bool traced, const Tracing::TraceContex break; } case OpenCensusConfig::GRPC_TRACE_BIN: { - const auto entry = trace_context.getTraceContext(Constants::get().GRPC_TRACE_BIN); + const auto entry = trace_context.getByKey(Constants::get().GRPC_TRACE_BIN); if (entry.has_value()) { found = true; // This is an implicitly untrusted header, so only the first value is used. @@ -114,7 +114,7 @@ startSpanHelper(const std::string& name, bool traced, const Tracing::TraceContex break; } case OpenCensusConfig::CLOUD_TRACE_CONTEXT: { - const auto entry = trace_context.getTraceContext(Constants::get().X_CLOUD_TRACE_CONTEXT); + const auto entry = trace_context.getByKey(Constants::get().X_CLOUD_TRACE_CONTEXT); if (entry.has_value()) { found = true; // This is an implicitly untrusted header, so only the first value is used. @@ -128,19 +128,19 @@ startSpanHelper(const std::string& name, bool traced, const Tracing::TraceContex absl::string_view b3_span_id; absl::string_view b3_sampled; absl::string_view b3_flags; - const auto h_b3_trace_id = trace_context.getTraceContext(Constants::get().X_B3_TRACEID); + const auto h_b3_trace_id = trace_context.getByKey(Constants::get().X_B3_TRACEID); if (h_b3_trace_id.has_value()) { b3_trace_id = h_b3_trace_id.value(); } - const auto h_b3_span_id = trace_context.getTraceContext(Constants::get().X_B3_SPANID); + const auto h_b3_span_id = trace_context.getByKey(Constants::get().X_B3_SPANID); if (h_b3_span_id.has_value()) { b3_span_id = h_b3_span_id.value(); } - const auto h_b3_sampled = trace_context.getTraceContext(Constants::get().X_B3_SAMPLED); + const auto h_b3_sampled = trace_context.getByKey(Constants::get().X_B3_SAMPLED); if (h_b3_sampled.has_value()) { b3_sampled = h_b3_sampled.value(); } - const auto h_b3_flags = trace_context.getTraceContext(Constants::get().X_B3_FLAGS); + const auto h_b3_flags = trace_context.getByKey(Constants::get().X_B3_FLAGS); if (h_b3_flags.has_value()) { b3_flags = h_b3_flags.value(); } @@ -207,27 +207,27 @@ void Span::injectContext(Tracing::TraceContext& trace_context) { for (const auto& outgoing : oc_config_.outgoing_trace_context()) { switch (outgoing) { case OpenCensusConfig::TRACE_CONTEXT: - trace_context.setTraceContextReferenceKey( - Constants::get().TRACEPARENT, ::opencensus::trace::propagation::ToTraceParentHeader(ctx)); + trace_context.setByReferenceKey(Constants::get().TRACEPARENT, + ::opencensus::trace::propagation::ToTraceParentHeader(ctx)); break; case OpenCensusConfig::GRPC_TRACE_BIN: { std::string val = ::opencensus::trace::propagation::ToGrpcTraceBinHeader(ctx); val = Base64::encode(val.data(), val.size(), /*add_padding=*/false); - trace_context.setTraceContextReferenceKey(Constants::get().GRPC_TRACE_BIN, val); + trace_context.setByReferenceKey(Constants::get().GRPC_TRACE_BIN, val); break; } case OpenCensusConfig::CLOUD_TRACE_CONTEXT: - trace_context.setTraceContextReferenceKey( + trace_context.setByReferenceKey( Constants::get().X_CLOUD_TRACE_CONTEXT, ::opencensus::trace::propagation::ToCloudTraceContextHeader(ctx)); break; case OpenCensusConfig::B3: - trace_context.setTraceContextReferenceKey( - Constants::get().X_B3_TRACEID, ::opencensus::trace::propagation::ToB3TraceIdHeader(ctx)); - trace_context.setTraceContextReferenceKey( - Constants::get().X_B3_SPANID, ::opencensus::trace::propagation::ToB3SpanIdHeader(ctx)); - trace_context.setTraceContextReferenceKey( - Constants::get().X_B3_SAMPLED, ::opencensus::trace::propagation::ToB3SampledHeader(ctx)); + trace_context.setByReferenceKey(Constants::get().X_B3_TRACEID, + ::opencensus::trace::propagation::ToB3TraceIdHeader(ctx)); + trace_context.setByReferenceKey(Constants::get().X_B3_SPANID, + ::opencensus::trace::propagation::ToB3SpanIdHeader(ctx)); + trace_context.setByReferenceKey(Constants::get().X_B3_SAMPLED, + ::opencensus::trace::propagation::ToB3SampledHeader(ctx)); // OpenCensus's trace context propagation doesn't produce the // "X-B3-Flags:" header. break; diff --git a/source/extensions/tracers/skywalking/skywalking_tracer_impl.cc b/source/extensions/tracers/skywalking/skywalking_tracer_impl.cc index 93a34abd254ff..81489fc2ffcde 100644 --- a/source/extensions/tracers/skywalking/skywalking_tracer_impl.cc +++ b/source/extensions/tracers/skywalking/skywalking_tracer_impl.cc @@ -51,7 +51,7 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, auto& tracer = tls_slot_ptr_->getTyped().tracer(); TracingContextPtr tracing_context; // TODO(shikugawa): support extension span header. - auto propagation_header = trace_context.getTraceContext(skywalkingPropagationHeaderKey()); + auto propagation_header = trace_context.getByKey(skywalkingPropagationHeaderKey()); if (!propagation_header.has_value()) { tracing_context = tracing_context_factory_->create(); // Sampling status is always true on SkyWalking. But with disabling skip_analysis, diff --git a/source/extensions/tracers/skywalking/tracer.cc b/source/extensions/tracers/skywalking/tracer.cc index f66cc83545df1..953d04220ebcb 100644 --- a/source/extensions/tracers/skywalking/tracer.cc +++ b/source/extensions/tracers/skywalking/tracer.cc @@ -45,16 +45,14 @@ void Span::finishSpan() { } void Span::injectContext(Tracing::TraceContext& trace_context) { - const auto host = trace_context.getTraceContext(Http::Headers::get().HostLegacy).value_or(""); - // TODO(wbpcode): Due to https://github.com/SkyAPM/cpp2sky/issues/83 in cpp2sky, it is necessary // to ensure that there is '\0' at the end of the string_view parameter to ensure that the // corresponding trace header is generated correctly. For this reason, we cannot directly use host // as argument. We need create a copy of std::string based on host and std::string will // automatically add '\0' to the end of the string content. - auto sw8_header = tracing_context_->createSW8HeaderValue(std::string(host)); + auto sw8_header = tracing_context_->createSW8HeaderValue(std::string(trace_context.authority())); if (sw8_header.has_value()) { - trace_context.setTraceContextReferenceKey(skywalkingPropagationHeaderKey(), sw8_header.value()); + trace_context.setByReferenceKey(skywalkingPropagationHeaderKey(), sw8_header.value()); } } diff --git a/source/extensions/tracers/xray/tracer.cc b/source/extensions/tracers/xray/tracer.cc index 6c033ef31b3bc..76792fb4cc16d 100644 --- a/source/extensions/tracers/xray/tracer.cc +++ b/source/extensions/tracers/xray/tracer.cc @@ -103,7 +103,7 @@ void Span::finishSpan() { void Span::injectContext(Tracing::TraceContext& trace_context) { const std::string xray_header_value = fmt::format("Root={};Parent={};Sampled={}", traceId(), id(), sampled() ? "1" : "0"); - trace_context.setTraceContextReferenceKey(XRayTraceHeader, xray_header_value); + trace_context.setByReferenceKey(XRayTraceHeader, xray_header_value); } Tracing::SpanPtr Span::spawnChild(const Tracing::Config&, const std::string& operation_name, diff --git a/source/extensions/tracers/xray/xray_tracer_impl.cc b/source/extensions/tracers/xray/xray_tracer_impl.cc index 964b982cba8ee..34d260d15668a 100644 --- a/source/extensions/tracers/xray/xray_tracer_impl.cc +++ b/source/extensions/tracers/xray/xray_tracer_impl.cc @@ -77,7 +77,7 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, UNREFERENCED_PARAMETER(config); // TODO(marcomagdy) - how do we factor this into the logic above UNREFERENCED_PARAMETER(tracing_decision); - const auto header = trace_context.getTraceContext(XRayTraceHeader); + const auto header = trace_context.getByKey(XRayTraceHeader); absl::optional should_trace; XRayHeader xray_header; if (header.has_value()) { @@ -98,10 +98,8 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, } if (!should_trace.has_value()) { - const SamplingRequest request{ - trace_context.getTraceContext(Http::Headers::get().HostLegacy).value_or(""), - trace_context.getTraceContext(Http::Headers::get().Method).value_or(""), - trace_context.getTraceContext(Http::Headers::get().Path).value_or("")}; + const SamplingRequest request{trace_context.authority(), trace_context.method(), + trace_context.path()}; should_trace = sampling_strategy_->shouldTrace(request); } diff --git a/source/extensions/tracers/zipkin/span_buffer.cc b/source/extensions/tracers/zipkin/span_buffer.cc index e5ab41876cca8..603a9129c0def 100644 --- a/source/extensions/tracers/zipkin/span_buffer.cc +++ b/source/extensions/tracers/zipkin/span_buffer.cc @@ -49,7 +49,9 @@ SerializerPtr SpanBuffer::makeSerializer( const bool shared_span_context) { switch (version) { case envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1: - return std::make_unique(); + throw EnvoyException( + "hidden_envoy_deprecated_HTTP_JSON_V1 has been deprecated. Please use a non-default " + "envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion value."); case envoy::config::trace::v3::ZipkinConfig::HTTP_JSON: return std::make_unique(shared_span_context); case envoy::config::trace::v3::ZipkinConfig::HTTP_PROTO: @@ -59,14 +61,6 @@ SerializerPtr SpanBuffer::makeSerializer( } } -std::string JsonV1Serializer::serialize(const std::vector& zipkin_spans) { - const std::string serialized_elements = - absl::StrJoin(zipkin_spans, ",", [](std::string* element, const Span& zipkin_span) { - absl::StrAppend(element, zipkin_span.toJson()); - }); - return absl::StrCat("[", serialized_elements, "]"); -} - JsonV2Serializer::JsonV2Serializer(const bool shared_span_context) : shared_span_context_{shared_span_context} {} diff --git a/source/extensions/tracers/zipkin/span_buffer.h b/source/extensions/tracers/zipkin/span_buffer.h index c1667c609078a..df1536044367f 100644 --- a/source/extensions/tracers/zipkin/span_buffer.h +++ b/source/extensions/tracers/zipkin/span_buffer.h @@ -24,7 +24,7 @@ class SpanBuffer { * the method allocateBuffer(size). * * @param version The selected Zipkin collector version. @see - * api/envoy/config/trace/v2/trace.proto. + * api/envoy/config/trace/v3/trace.proto. * @param shared_span_context To determine whether client and server spans will share the same * span context. */ @@ -35,7 +35,7 @@ class SpanBuffer { * Constructor that initializes a buffer with the given size. * * @param version The selected Zipkin collector version. @see - * api/envoy/config/trace/v2/trace.proto. + * api/envoy/config/trace/v3/trace.proto. * @param shared_span_context To determine whether client and server spans will share the same * span context. * @param size The desired buffer size. @@ -92,21 +92,6 @@ class SpanBuffer { using SpanBufferPtr = std::unique_ptr; -/** - * JsonV1Serializer implements Zipkin::Serializer that serializes list of Zipkin spans into JSON - * Zipkin v1 array. - */ -class JsonV1Serializer : public Serializer { -public: - JsonV1Serializer() = default; - - /** - * Serialize list of Zipkin spans into Zipkin v1 JSON array. - * @return std::string serialized pending spans as Zipkin v1 JSON array. - */ - std::string serialize(const std::vector& pending_spans) override; -}; - /** * JsonV2Serializer implements Zipkin::Serializer that serializes list of Zipkin spans into JSON * Zipkin v2 array. diff --git a/source/extensions/tracers/zipkin/span_context_extractor.cc b/source/extensions/tracers/zipkin/span_context_extractor.cc index 9e1a40415a339..d51833f2e8b4f 100644 --- a/source/extensions/tracers/zipkin/span_context_extractor.cc +++ b/source/extensions/tracers/zipkin/span_context_extractor.cc @@ -35,7 +35,7 @@ SpanContextExtractor::~SpanContextExtractor() = default; bool SpanContextExtractor::extractSampled(const Tracing::Decision tracing_decision) { bool sampled(false); - auto b3_header_entry = trace_context_.getTraceContext(ZipkinCoreConstants::get().B3); + auto b3_header_entry = trace_context_.getByKey(ZipkinCoreConstants::get().B3); if (b3_header_entry.has_value()) { // This is an implicitly untrusted header, so only the first value is used. absl::string_view b3 = b3_header_entry.value(); @@ -61,7 +61,7 @@ bool SpanContextExtractor::extractSampled(const Tracing::Decision tracing_decisi return getSamplingFlags(b3[sampled_pos], tracing_decision); } - auto x_b3_sampled_entry = trace_context_.getTraceContext(ZipkinCoreConstants::get().X_B3_SAMPLED); + auto x_b3_sampled_entry = trace_context_.getByKey(ZipkinCoreConstants::get().X_B3_SAMPLED); if (!x_b3_sampled_entry.has_value()) { return tracing_decision.traced; } @@ -74,7 +74,7 @@ bool SpanContextExtractor::extractSampled(const Tracing::Decision tracing_decisi } std::pair SpanContextExtractor::extractSpanContext(bool is_sampled) { - if (trace_context_.getTraceContext(ZipkinCoreConstants::get().B3).has_value()) { + if (trace_context_.getByKey(ZipkinCoreConstants::get().B3).has_value()) { return extractSpanContextFromB3SingleFormat(is_sampled); } uint64_t trace_id(0); @@ -82,8 +82,8 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa uint64_t span_id(0); uint64_t parent_id(0); - auto b3_trace_id_entry = trace_context_.getTraceContext(ZipkinCoreConstants::get().X_B3_TRACE_ID); - auto b3_span_id_entry = trace_context_.getTraceContext(ZipkinCoreConstants::get().X_B3_SPAN_ID); + auto b3_trace_id_entry = trace_context_.getByKey(ZipkinCoreConstants::get().X_B3_TRACE_ID); + auto b3_span_id_entry = trace_context_.getByKey(ZipkinCoreConstants::get().X_B3_SPAN_ID); if (b3_span_id_entry.has_value() && b3_trace_id_entry.has_value()) { // Extract trace id - which can either be 128 or 64 bit. For 128 bit, // it needs to be divided into two 64 bit numbers (high and low). @@ -108,7 +108,7 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa } auto b3_parent_id_entry = - trace_context_.getTraceContext(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID); + trace_context_.getByKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID); if (b3_parent_id_entry.has_value() && !b3_parent_id_entry.value().empty()) { // This is an implicitly untrusted header, so only the first value is used. const std::string pspid(b3_parent_id_entry.value()); @@ -125,7 +125,7 @@ std::pair SpanContextExtractor::extractSpanContext(bool is_sa std::pair SpanContextExtractor::extractSpanContextFromB3SingleFormat(bool is_sampled) { - auto b3_head_entry = trace_context_.getTraceContext(ZipkinCoreConstants::get().B3); + auto b3_head_entry = trace_context_.getByKey(ZipkinCoreConstants::get().B3); ASSERT(b3_head_entry.has_value()); // This is an implicitly untrusted header, so only the first value is used. const std::string b3(b3_head_entry.value()); diff --git a/source/extensions/tracers/zipkin/zipkin_core_constants.h b/source/extensions/tracers/zipkin/zipkin_core_constants.h index 03e1ba63a40b6..ee82dc36055a9 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_constants.h +++ b/source/extensions/tracers/zipkin/zipkin_core_constants.h @@ -37,7 +37,6 @@ constexpr char SERVER_ADDR[] = "sa"; constexpr char SAMPLED[] = "1"; constexpr char NOT_SAMPLED[] = "0"; -constexpr char DEFAULT_COLLECTOR_ENDPOINT[] = "/api/v1/spans"; constexpr bool DEFAULT_SHARED_SPAN_CONTEXT = true; } // namespace diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index 9e87e1343e27e..33e0327b26fc0 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -41,20 +41,19 @@ std::string ZipkinSpan::getBaggage(absl::string_view) { return EMPTY_STRING; } void ZipkinSpan::injectContext(Tracing::TraceContext& trace_context) { // Set the trace-id and span-id headers properly, based on the newly-created span structure. - trace_context.setTraceContextReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, - span_.traceIdAsHexString()); - trace_context.setTraceContextReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, - span_.idAsHexString()); + trace_context.setByReferenceKey(ZipkinCoreConstants::get().X_B3_TRACE_ID, + span_.traceIdAsHexString()); + trace_context.setByReferenceKey(ZipkinCoreConstants::get().X_B3_SPAN_ID, span_.idAsHexString()); // Set the parent-span header properly, based on the newly-created span structure. if (span_.isSetParentId()) { - trace_context.setTraceContextReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID, - span_.parentIdAsHexString()); + trace_context.setByReferenceKey(ZipkinCoreConstants::get().X_B3_PARENT_SPAN_ID, + span_.parentIdAsHexString()); } // Set the sampled header. - trace_context.setTraceContextReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED, - span_.sampled() ? SAMPLED : NOT_SAMPLED); + trace_context.setByReferenceKey(ZipkinCoreConstants::get().X_B3_SAMPLED, + span_.sampled() ? SAMPLED : NOT_SAMPLED); } void ZipkinSpan::setSampled(bool sampled) { span_.setSampled(sampled); } @@ -88,7 +87,7 @@ Driver::Driver(const envoy::config::trace::v3::ZipkinConfig& zipkin_config, if (!zipkin_config.collector_endpoint().empty()) { collector.endpoint_ = zipkin_config.collector_endpoint(); } - // The current default version of collector_endpoint_version is HTTP_JSON_V1. + // The current default version of collector_endpoint_version is HTTP_JSON. collector.version_ = zipkin_config.collector_endpoint_version(); const bool trace_id_128bit = zipkin_config.trace_id_128bit(); @@ -119,16 +118,12 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, auto ret_span_context = extractor.extractSpanContext(sampled); if (!ret_span_context.second) { // Create a root Zipkin span. No context was found in the headers. - new_zipkin_span = tracer.startSpan( - config, - std::string(trace_context.getTraceContext(Http::Headers::get().HostLegacy).value_or("")), - start_time); + new_zipkin_span = + tracer.startSpan(config, std::string(trace_context.authority()), start_time); new_zipkin_span->setSampled(sampled); } else { - new_zipkin_span = tracer.startSpan( - config, - std::string(trace_context.getTraceContext(Http::Headers::get().HostLegacy).value_or("")), - start_time, ret_span_context.first); + new_zipkin_span = tracer.startSpan(config, std::string(trace_context.authority()), start_time, + ret_span_context.first); } } catch (const ExtractorException& e) { diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index 9e000cafd6601..a416c3268a4b3 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -158,15 +158,12 @@ class Driver : public Tracing::Driver { * Information about the Zipkin collector. */ struct CollectorInfo { - // The Zipkin collector endpoint/path to receive the collected trace data. e.g. /api/v1/spans if - // HTTP_JSON_V1 or /api/v2/spans otherwise. - std::string endpoint_{DEFAULT_COLLECTOR_ENDPOINT}; + // The Zipkin collector endpoint/path to receive the collected trace data. + std::string endpoint_; // The version of the collector. This is related to endpoint's supported payload specification and - // transport. Currently it defaults to envoy::config::trace::v2::ZipkinConfig::HTTP_JSON_V1. In - // the future, we will throw when collector_endpoint_version is not specified. - envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion version_{ - envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1}; + // transport. + envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion version_; bool shared_span_context_{DEFAULT_SHARED_SPAN_CONTEXT}; }; diff --git a/source/extensions/transport_sockets/alts/tsi_socket.cc b/source/extensions/transport_sockets/alts/tsi_socket.cc index a3a5c15f0a6bd..5689fd7f8d226 100644 --- a/source/extensions/transport_sockets/alts/tsi_socket.cc +++ b/source/extensions/transport_sockets/alts/tsi_socket.cc @@ -56,9 +56,10 @@ void TsiSocket::doHandshakeNext() { raw_read_buffer_.length()); if (!handshaker_) { - handshaker_ = handshaker_factory_(callbacks_->connection().dispatcher(), - callbacks_->connection().addressProvider().localAddress(), - callbacks_->connection().addressProvider().remoteAddress()); + handshaker_ = + handshaker_factory_(callbacks_->connection().dispatcher(), + callbacks_->connection().connectionInfoProvider().localAddress(), + callbacks_->connection().connectionInfoProvider().remoteAddress()); if (!handshaker_) { ENVOY_CONN_LOG(warn, "TSI: failed to create handshaker", callbacks_->connection()); callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc index 4488396f3a513..627d70d12958f 100644 --- a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc @@ -52,8 +52,8 @@ void UpstreamProxyProtocolSocket::generateHeader() { void UpstreamProxyProtocolSocket::generateHeaderV1() { // Default to local addresses. Used if no downstream connection exists or // downstream address info is not set e.g. health checks - auto src_addr = callbacks_->connection().addressProvider().localAddress(); - auto dst_addr = callbacks_->connection().addressProvider().remoteAddress(); + auto src_addr = callbacks_->connection().connectionInfoProvider().localAddress(); + auto dst_addr = callbacks_->connection().connectionInfoProvider().remoteAddress(); if (options_ && options_->proxyProtocolOptions().has_value()) { const auto options = options_->proxyProtocolOptions().value(); diff --git a/source/extensions/transport_sockets/tap/tap_config_impl.cc b/source/extensions/transport_sockets/tap/tap_config_impl.cc index 7855df9273ff2..163224e0aefeb 100644 --- a/source/extensions/transport_sockets/tap/tap_config_impl.cc +++ b/source/extensions/transport_sockets/tap/tap_config_impl.cc @@ -29,12 +29,12 @@ PerSocketTapperImpl::PerSocketTapperImpl(SocketTapConfigSharedPtr config, } void PerSocketTapperImpl::fillConnectionInfo(envoy::data::tap::v3::Connection& connection) { - if (connection_.addressProvider().localAddress() != nullptr) { + if (connection_.connectionInfoProvider().localAddress() != nullptr) { // Local address might not be populated before a client connection is connected. - Network::Utility::addressToProtobufAddress(*connection_.addressProvider().localAddress(), + Network::Utility::addressToProtobufAddress(*connection_.connectionInfoProvider().localAddress(), *connection.mutable_local_address()); } - Network::Utility::addressToProtobufAddress(*connection_.addressProvider().remoteAddress(), + Network::Utility::addressToProtobufAddress(*connection_.connectionInfoProvider().remoteAddress(), *connection.mutable_remote_address()); } diff --git a/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc b/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc index 0585939c4173b..6cd624f2e38fa 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc +++ b/source/extensions/transport_sockets/tls/cert_validator/default_validator.cc @@ -228,7 +228,8 @@ int DefaultCertValidator::doVerifyCertChain( Envoy::Ssl::ClientValidationStatus DefaultCertValidator::verifyCertificate( X509* cert, const std::vector& verify_san_list, - const std::vector& subject_alt_name_matchers) { + const std::vector>& + subject_alt_name_matchers) { Envoy::Ssl::ClientValidationStatus validated = Envoy::Ssl::ClientValidationStatus::NotValidated; if (!verify_san_list.empty()) { @@ -307,7 +308,9 @@ bool DefaultCertValidator::dnsNameMatch(const absl::string_view dns_name, } bool DefaultCertValidator::matchSubjectAltName( - X509* cert, const std::vector& subject_alt_name_matchers) { + X509* cert, + const std::vector>& + subject_alt_name_matchers) { bssl::UniquePtr san_names( static_cast(X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr))); if (san_names == nullptr) { diff --git a/source/extensions/transport_sockets/tls/cert_validator/default_validator.h b/source/extensions/transport_sockets/tls/cert_validator/default_validator.h index fb7e7acab1f4e..4d5daaf0205e0 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/default_validator.h +++ b/source/extensions/transport_sockets/tls/cert_validator/default_validator.h @@ -52,9 +52,10 @@ class DefaultCertValidator : public CertValidator { Envoy::Ssl::CertificateDetailsPtr getCaCertInformation() const override; // Utility functions. - Envoy::Ssl::ClientValidationStatus - verifyCertificate(X509* cert, const std::vector& verify_san_list, - const std::vector& subject_alt_name_matchers); + Envoy::Ssl::ClientValidationStatus verifyCertificate( + X509* cert, const std::vector& verify_san_list, + const std::vector>& + subject_alt_name_matchers); /** * Verifies certificate hash for pinning. The hash is a hex-encoded SHA-256 of the DER-encoded @@ -101,9 +102,10 @@ class DefaultCertValidator : public CertValidator { * @param subject_alt_name_matchers the configured matchers to match * @return true if the verification succeeds */ - static bool - matchSubjectAltName(X509* cert, - const std::vector& subject_alt_name_matchers); + static bool matchSubjectAltName( + X509* cert, + const std::vector>& + subject_alt_name_matchers); private: const Envoy::Ssl::CertificateValidationContextConfig* config_; @@ -113,7 +115,8 @@ class DefaultCertValidator : public CertValidator { bool allow_untrusted_certificate_{false}; bssl::UniquePtr ca_cert_; std::string ca_file_path_; - std::vector subject_alt_name_matchers_; + std::vector> + subject_alt_name_matchers_; std::vector> verify_certificate_hash_list_; std::vector> verify_certificate_spki_list_; bool verify_trusted_ca_{false}; diff --git a/source/extensions/transport_sockets/tls/cert_validator/spiffe/spiffe_validator.cc b/source/extensions/transport_sockets/tls/cert_validator/spiffe/spiffe_validator.cc index bebf2a3067f98..63df8297c76dc 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/spiffe/spiffe_validator.cc +++ b/source/extensions/transport_sockets/tls/cert_validator/spiffe/spiffe_validator.cc @@ -33,7 +33,6 @@ SPIFFEValidator::SPIFFEValidator(const Envoy::Ssl::CertificateValidationContextC SPIFFEConfig message; Config::Utility::translateOpaqueConfig(config->customValidatorConfig().value().typed_config(), - ProtobufWkt::Struct(), ProtobufMessage::getStrictValidationVisitor(), message); if (!config->subjectAltNameMatchers().empty()) { diff --git a/source/extensions/transport_sockets/tls/cert_validator/spiffe/spiffe_validator.h b/source/extensions/transport_sockets/tls/cert_validator/spiffe/spiffe_validator.h index fc3a045d02558..b4cc068e908e5 100644 --- a/source/extensions/transport_sockets/tls/cert_validator/spiffe/spiffe_validator.h +++ b/source/extensions/transport_sockets/tls/cert_validator/spiffe/spiffe_validator.h @@ -67,7 +67,8 @@ class SPIFFEValidator : public CertValidator { bool allow_expired_certificate_{false}; std::vector> ca_certs_; std::string ca_file_name_; - std::vector subject_alt_name_matchers_{}; + std::vector> + subject_alt_name_matchers_{}; absl::flat_hash_map trust_bundle_stores_; SslStats& stats_; diff --git a/source/extensions/transport_sockets/tls/context_config_impl.cc b/source/extensions/transport_sockets/tls/context_config_impl.cc index 4dae255fa1c37..4c83c222efdde 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.cc +++ b/source/extensions/transport_sockets/tls/context_config_impl.cc @@ -181,7 +181,8 @@ ContextConfigImpl::ContextConfigImpl( min_protocol_version_(tlsVersionFromProto(config.tls_params().tls_minimum_protocol_version(), default_min_protocol_version)), max_protocol_version_(tlsVersionFromProto(config.tls_params().tls_maximum_protocol_version(), - default_max_protocol_version)) { + default_max_protocol_version)), + factory_context_(factory_context) { if (certificate_validation_context_provider_ != nullptr) { if (default_cvc_) { // We need to validate combined certificate validation context. @@ -212,7 +213,7 @@ ContextConfigImpl::ContextConfigImpl( if (!tls_certificate_providers_.empty()) { for (auto& provider : tls_certificate_providers_) { if (provider->secret() != nullptr) { - tls_certificate_configs_.emplace_back(*provider->secret(), &factory_context, api_); + tls_certificate_configs_.emplace_back(*provider->secret(), factory_context, api_); } } } @@ -257,7 +258,7 @@ void ContextConfigImpl::setSecretUpdateCallback(std::function callback) for (const auto& tls_certificate_provider : tls_certificate_providers_) { auto* secret = tls_certificate_provider->secret(); if (secret != nullptr) { - tls_certificate_configs_.emplace_back(*secret, nullptr, api_); + tls_certificate_configs_.emplace_back(*secret, factory_context_, api_); } } callback(); diff --git a/source/extensions/transport_sockets/tls/context_config_impl.h b/source/extensions/transport_sockets/tls/context_config_impl.h index 7dcbb0ea8681e..691f9148a8bd9 100644 --- a/source/extensions/transport_sockets/tls/context_config_impl.h +++ b/source/extensions/transport_sockets/tls/context_config_impl.h @@ -99,6 +99,7 @@ class ContextConfigImpl : public virtual Ssl::ContextConfig { Ssl::HandshakerFactoryCb handshaker_factory_cb_; Ssl::HandshakerCapabilities capabilities_; Ssl::SslCtxCb sslctx_cb_; + Server::Configuration::TransportSocketFactoryContext& factory_context_; }; class ClientContextConfigImpl : public ContextConfigImpl, public Envoy::Ssl::ClientContextConfig { diff --git a/source/extensions/transport_sockets/tls/context_impl.cc b/source/extensions/transport_sockets/tls/context_impl.cc index abb59d986111b..f8870d4dddc88 100644 --- a/source/extensions/transport_sockets/tls/context_impl.cc +++ b/source/extensions/transport_sockets/tls/context_impl.cc @@ -777,9 +777,7 @@ ServerContextImpl::ServerContextImpl(Stats::Scope& scope, auto& ocsp_resp_bytes = tls_certificates[i].get().ocspStaple(); if (ocsp_resp_bytes.empty()) { - if (Runtime::runtimeFeatureEnabled( - "envoy.reloadable_features.require_ocsp_response_for_must_staple_certs") && - ctx.is_must_staple_) { + if (ctx.is_must_staple_) { throw EnvoyException("OCSP response is required for must-staple certificate"); } if (ocsp_staple_policy_ == Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple) { @@ -1047,11 +1045,6 @@ OcspStapleAction ServerContextImpl::ocspStapleAction(const TlsContext& ctx, } auto& response = ctx.ocsp_response_; - if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.check_ocsp_policy")) { - // Expiration check is disabled. Proceed as if the policy is LenientStapling and the response - // is not expired. - return response ? OcspStapleAction::Staple : OcspStapleAction::NoStaple; - } auto policy = ocsp_staple_policy_; if (ctx.is_must_staple_) { diff --git a/source/extensions/transport_sockets/tls/io_handle_bio.cc b/source/extensions/transport_sockets/tls/io_handle_bio.cc index 3b595f7062aa2..08ab2655b9a62 100644 --- a/source/extensions/transport_sockets/tls/io_handle_bio.cc +++ b/source/extensions/transport_sockets/tls/io_handle_bio.cc @@ -58,6 +58,8 @@ int io_handle_read(BIO* b, char* out, int outl) { auto err = result.err_->getErrorCode(); if (err == Api::IoError::IoErrorCode::Again || err == Api::IoError::IoErrorCode::Interrupt) { BIO_set_retry_read(b); + } else { + ERR_put_error(ERR_LIB_SYS, 0, result.err_->getSystemErrorCode(), __FILE__, __LINE__); } return -1; } @@ -75,6 +77,8 @@ int io_handle_write(BIO* b, const char* in, int inl) { auto err = result.err_->getErrorCode(); if (err == Api::IoError::IoErrorCode::Again || err == Api::IoError::IoErrorCode::Interrupt) { BIO_set_retry_write(b); + } else { + ERR_put_error(ERR_LIB_SYS, 0, result.err_->getSystemErrorCode(), __FILE__, __LINE__); } return -1; } diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index c2eef31132e3d..c5056786ee9a2 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -197,6 +197,11 @@ void SslSocket::drainErrorQueue() { } else if (ERR_GET_REASON(err) == SSL_R_CERTIFICATE_VERIFY_FAILED) { saw_counted_error = true; } + } else if (ERR_GET_LIB(err) == ERR_LIB_SYS) { + // Any syscall errors that result in connection closure are already tracked in other + // connection related stats. We will still retain the specific syscall failure for + // transport failure reasons. + saw_counted_error = true; } saw_error = true; diff --git a/source/extensions/upstreams/http/tcp/upstream_request.cc b/source/extensions/upstreams/http/tcp/upstream_request.cc index 17d291a1a437a..914e346d19031 100644 --- a/source/extensions/upstreams/http/tcp/upstream_request.cc +++ b/source/extensions/upstreams/http/tcp/upstream_request.cc @@ -27,7 +27,8 @@ void TcpConnPool::onPoolReady(Envoy::Tcp::ConnectionPool::ConnectionDataPtr&& co Network::Connection& latched_conn = conn_data->connection(); auto upstream = std::make_unique(&callbacks_->upstreamToDownstream(), std::move(conn_data)); - callbacks_->onPoolReady(std::move(upstream), host, latched_conn.addressProvider().localAddress(), + callbacks_->onPoolReady(std::move(upstream), host, + latched_conn.connectionInfoProvider().localAddress(), latched_conn.streamInfo(), {}); } diff --git a/source/extensions/watchdog/profile_action/config.cc b/source/extensions/watchdog/profile_action/config.cc index ebf4dfa24e08f..cc0bf1a115262 100644 --- a/source/extensions/watchdog/profile_action/config.cc +++ b/source/extensions/watchdog/profile_action/config.cc @@ -15,7 +15,7 @@ Server::Configuration::GuardDogActionPtr ProfileActionFactory::createGuardDogAct const envoy::config::bootstrap::v3::Watchdog::WatchdogAction& config, Server::Configuration::GuardDogActionFactoryContext& context) { auto message = createEmptyConfigProto(); - Config::Utility::translateOpaqueConfig(config.config().typed_config(), ProtobufWkt::Struct(), + Config::Utility::translateOpaqueConfig(config.config().typed_config(), ProtobufMessage::getStrictValidationVisitor(), *message); return std::make_unique(dynamic_cast(*message), context); } diff --git a/source/server/BUILD b/source/server/BUILD index 1efb9e42b30f0..b29bb1a84e333 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -157,6 +157,7 @@ envoy_cc_library( name = "active_tcp_listener_headers", hdrs = [ "active_stream_listener_base.h", + "active_tcp_listener.h", "active_tcp_socket.h", ], deps = [ @@ -171,7 +172,10 @@ envoy_cc_library( "//envoy/network:listen_socket_interface", "//envoy/network:listener_interface", "//envoy/server:listener_manager_interface", + "//source/common/common:assert_lib", "//source/common/common:linked_object", + "//source/common/network:connection_lib", + "//source/common/stats:timespan_lib", ], ) @@ -385,7 +389,6 @@ envoy_cc_library( "//source/common/protobuf:utility_lib", "//source/common/stats:stats_lib", "//source/common/version:version_lib", - "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", ], ) @@ -462,7 +465,6 @@ envoy_cc_library( "//source/common/common:basic_resource_lib", "//source/common/common:empty_string", "//source/common/config:utility_lib", - "//source/common/config:version_converter_lib", "//source/common/http:conn_manager_lib", "//source/common/init:manager_lib", "//source/common/init:target_lib", @@ -601,7 +603,6 @@ envoy_cc_library( "//source/server:overload_manager_lib", "//source/server/admin:admin_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", - "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/source/server/active_listener_base.h b/source/server/active_listener_base.h index 6981900983c89..22dda88adcf78 100644 --- a/source/server/active_listener_base.h +++ b/source/server/active_listener_base.h @@ -11,6 +11,7 @@ namespace Server { COUNTER(downstream_cx_destroy) \ COUNTER(downstream_cx_overflow) \ COUNTER(downstream_cx_total) \ + COUNTER(downstream_cx_transport_socket_connect_timeout) \ COUNTER(downstream_cx_overload_reject) \ COUNTER(downstream_global_cx_overflow) \ COUNTER(downstream_pre_cx_timeout) \ diff --git a/source/server/active_stream_listener_base.cc b/source/server/active_stream_listener_base.cc index 39d336034c5fc..abc5ca7ce4fb7 100644 --- a/source/server/active_stream_listener_base.cc +++ b/source/server/active_stream_listener_base.cc @@ -2,6 +2,8 @@ #include "envoy/network/filter.h" +#include "source/common/stats/timespan_impl.h" + namespace Envoy { namespace Server { @@ -27,9 +29,9 @@ void ActiveStreamListenerBase::newConnection(Network::ConnectionSocketPtr&& sock // Find matching filter chain. const auto filter_chain = config_->filterChainManager().findFilterChain(*socket); if (filter_chain == nullptr) { - RELEASE_ASSERT(socket->addressProvider().remoteAddress() != nullptr, ""); + RELEASE_ASSERT(socket->connectionInfoProvider().remoteAddress() != nullptr, ""); ENVOY_LOG(debug, "closing connection from {}: no matching filter chain found", - socket->addressProvider().remoteAddress()->asString()); + socket->connectionInfoProvider().remoteAddress()->asString()); stats_.no_filter_chain_match_.inc(); stream_info->setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); stream_info->setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); @@ -39,24 +41,125 @@ void ActiveStreamListenerBase::newConnection(Network::ConnectionSocketPtr&& sock } stream_info->setFilterChainName(filter_chain->name()); auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); - stream_info->setDownstreamSslConnection(transport_socket->ssl()); auto server_conn_ptr = dispatcher().createServerConnection( std::move(socket), std::move(transport_socket), *stream_info); if (const auto timeout = filter_chain->transportSocketConnectTimeout(); timeout != std::chrono::milliseconds::zero()) { - server_conn_ptr->setTransportSocketConnectTimeout(timeout); + server_conn_ptr->setTransportSocketConnectTimeout( + timeout, stats_.downstream_cx_transport_socket_connect_timeout_); } server_conn_ptr->setBufferLimits(config_->perConnectionBufferLimitBytes()); - RELEASE_ASSERT(server_conn_ptr->addressProvider().remoteAddress() != nullptr, ""); + RELEASE_ASSERT(server_conn_ptr->connectionInfoProvider().remoteAddress() != nullptr, ""); const bool empty_filter_chain = !config_->filterChainFactory().createNetworkFilterChain( *server_conn_ptr, filter_chain->networkFilterFactories()); if (empty_filter_chain) { ENVOY_CONN_LOG(debug, "closing connection from {}: no filters", *server_conn_ptr, - server_conn_ptr->addressProvider().remoteAddress()->asString()); + server_conn_ptr->connectionInfoProvider().remoteAddress()->asString()); server_conn_ptr->close(Network::ConnectionCloseType::NoFlush); } newActiveConnection(*filter_chain, std::move(server_conn_ptr), std::move(stream_info)); } +ActiveConnections::ActiveConnections(OwnedActiveStreamListenerBase& listener, + const Network::FilterChain& filter_chain) + : listener_(listener), filter_chain_(filter_chain) {} + +ActiveConnections::~ActiveConnections() { + // connections should be defer deleted already. + ASSERT(connections_.empty()); +} + +ActiveTcpConnection::ActiveTcpConnection(ActiveConnections& active_connections, + Network::ConnectionPtr&& new_connection, + TimeSource& time_source, + std::unique_ptr&& stream_info) + : stream_info_(std::move(stream_info)), active_connections_(active_connections), + connection_(std::move(new_connection)), + conn_length_(new Stats::HistogramCompletableTimespanImpl( + active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)) { + // We just universally set no delay on connections. Theoretically we might at some point want + // to make this configurable. + connection_->noDelay(true); + auto& listener = active_connections_.listener_; + listener.stats_.downstream_cx_total_.inc(); + listener.stats_.downstream_cx_active_.inc(); + listener.per_worker_stats_.downstream_cx_total_.inc(); + listener.per_worker_stats_.downstream_cx_active_.inc(); + + // Active connections on the handler (not listener). The per listener connections have already + // been incremented at this point either via the connection balancer or in the socket accept + // path if there is no configured balancer. + listener.parent_.incNumConnections(); +} + +ActiveTcpConnection::~ActiveTcpConnection() { + ActiveStreamListenerBase::emitLogs(*active_connections_.listener_.config_, *stream_info_); + auto& listener = active_connections_.listener_; + listener.stats_.downstream_cx_active_.dec(); + listener.stats_.downstream_cx_destroy_.inc(); + listener.per_worker_stats_.downstream_cx_active_.dec(); + conn_length_->complete(); + + // Active listener connections (not handler). + listener.decNumConnections(); + + // Active handler connections (not listener). + listener.parent_.decNumConnections(); +} + +void ActiveTcpConnection::onEvent(Network::ConnectionEvent event) { + ENVOY_LOG(trace, "[C{}] connection on event {}", connection_->id(), static_cast(event)); + // Any event leads to destruction of the connection. + if (event == Network::ConnectionEvent::LocalClose || + event == Network::ConnectionEvent::RemoteClose) { + active_connections_.listener_.removeConnection(*this); + } +} + +void OwnedActiveStreamListenerBase::removeConnection(ActiveTcpConnection& connection) { + ENVOY_CONN_LOG(debug, "adding to cleanup list", *connection.connection_); + ActiveConnections& active_connections = connection.active_connections_; + ActiveConnectionPtr removed = connection.removeFromList(active_connections.connections_); + dispatcher().deferredDelete(std::move(removed)); + // Delete map entry only iff connections becomes empty. + if (active_connections.connections_.empty()) { + auto iter = connections_by_context_.find(&active_connections.filter_chain_); + ASSERT(iter != connections_by_context_.end()); + // To cover the lifetime of every single connection, Connections need to be deferred deleted + // because the previously contained connection is deferred deleted. + dispatcher().deferredDelete(std::move(iter->second)); + // The erase will break the iteration over the connections_by_context_ during the deletion. + if (!is_deleting_) { + connections_by_context_.erase(iter); + } + } +} + +ActiveConnections& OwnedActiveStreamListenerBase::getOrCreateActiveConnections( + const Network::FilterChain& filter_chain) { + ActiveConnectionCollectionPtr& connections = connections_by_context_[&filter_chain]; + if (connections == nullptr) { + connections = std::make_unique(*this, filter_chain); + } + return *connections; +} + +void OwnedActiveStreamListenerBase::removeFilterChain(const Network::FilterChain* filter_chain) { + auto iter = connections_by_context_.find(filter_chain); + if (iter == connections_by_context_.end()) { + // It is possible when listener is stopping. + } else { + auto& connections = iter->second->connections_; + while (!connections.empty()) { + connections.front()->connection_->close(Network::ConnectionCloseType::NoFlush); + } + // Since is_deleting_ is on, we need to manually remove the map value and drive the + // iterator. Defer delete connection container to avoid race condition in destroying + // connection. + dispatcher().deferredDelete(std::move(iter->second)); + connections_by_context_.erase(iter); + } +} + } // namespace Server } // namespace Envoy diff --git a/source/server/active_stream_listener_base.h b/source/server/active_stream_listener_base.h index 2d99e3965bf69..89ebd2877c659 100644 --- a/source/server/active_stream_listener_base.h +++ b/source/server/active_stream_listener_base.h @@ -10,6 +10,7 @@ #include "envoy/network/connection.h" #include "envoy/network/connection_handler.h" #include "envoy/network/listener.h" +#include "envoy/stats/timespan.h" #include "envoy/stream_info/stream_info.h" #include "source/common/common/linked_object.h" @@ -23,6 +24,7 @@ namespace Server { // After the active socket passes all the listener filters, a server connection is created. The // derived listener must override ``newActiveConnection`` to take the ownership of that server // connection. +// TODO(lambdai): Refactor the listener filter test cases to adopt this class. class ActiveStreamListenerBase : public ActiveListenerImplBase, protected Logger::Loggable { public: @@ -135,5 +137,81 @@ class ActiveStreamListenerBase : public ActiveListenerImplBase, Event::Dispatcher& dispatcher_; }; +struct ActiveTcpConnection; +class OwnedActiveStreamListenerBase; + +/** + * Wrapper for a group of active connections which are attached to the same filter chain context. + */ +class ActiveConnections : public Event::DeferredDeletable { +public: + ActiveConnections(OwnedActiveStreamListenerBase& listener, + const Network::FilterChain& filter_chain); + ~ActiveConnections() override; + + // listener filter chain pair is the owner of the connections + OwnedActiveStreamListenerBase& listener_; + const Network::FilterChain& filter_chain_; + // Owned connections. + std::list> connections_; +}; + +/** + * Wrapper for an active TCP connection owned by this handler. + */ +struct ActiveTcpConnection : LinkedObject, + public Event::DeferredDeletable, + public Network::ConnectionCallbacks, + Logger::Loggable { + ActiveTcpConnection(ActiveConnections& active_connections, + Network::ConnectionPtr&& new_connection, TimeSource& time_system, + std::unique_ptr&& stream_info); + ~ActiveTcpConnection() override; + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + std::unique_ptr stream_info_; + ActiveConnections& active_connections_; + Network::ConnectionPtr connection_; + Stats::TimespanPtr conn_length_; +}; + +using ActiveConnectionPtr = std::unique_ptr; +using ActiveConnectionCollectionPtr = std::unique_ptr; + +// The mixin that handles the composition type ActiveConnectionCollection. This mixin +// provides the connection removal helper and the filter chain removal helper. +// All the prod stream listeners should inherit from this class and leave ActiveStreamListenerBase +// for unit test only. +class OwnedActiveStreamListenerBase : public ActiveStreamListenerBase { +public: + OwnedActiveStreamListenerBase(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, + Network::ListenerPtr&& listener, Network::ListenerConfig& config) + : ActiveStreamListenerBase(parent, dispatcher, std::move(listener), config) {} + + /** + * Remove and destroy an active connection. + * @param connection supplies the connection to remove. + */ + void removeConnection(ActiveTcpConnection& connection); + +protected: + /** + * Return the active connections container attached to the given filter chain. + */ + ActiveConnections& getOrCreateActiveConnections(const Network::FilterChain& filter_chain); + + /** + * Remove an filter chain. All the active connections that are attached to the filter chain will + * be destroyed. + * @param filter_chain supplies the filter chain to remove. + */ + void removeFilterChain(const Network::FilterChain* filter_chain) override; + + absl::flat_hash_map + connections_by_context_; +}; } // namespace Server } // namespace Envoy diff --git a/source/server/active_tcp_listener.cc b/source/server/active_tcp_listener.cc index cee5494394436..3e0f850c9b7a6 100644 --- a/source/server/active_tcp_listener.cc +++ b/source/server/active_tcp_listener.cc @@ -8,18 +8,17 @@ #include "source/common/common/assert.h" #include "source/common/network/connection_impl.h" #include "source/common/network/utility.h" -#include "source/common/stats/timespan_impl.h" namespace Envoy { namespace Server { ActiveTcpListener::ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerConfig& config, uint32_t worker_index) - : ActiveStreamListenerBase(parent, parent.dispatcher(), - parent.dispatcher().createListener( - config.listenSocketFactory().getListenSocket(worker_index), - *this, config.bindToPort()), - config), + : OwnedActiveStreamListenerBase(parent, parent.dispatcher(), + parent.dispatcher().createListener( + config.listenSocketFactory().getListenSocket(worker_index), + *this, config.bindToPort()), + config), tcp_conn_handler_(parent) { config.connectionBalancer().registerHandler(*this); } @@ -27,7 +26,7 @@ ActiveTcpListener::ActiveTcpListener(Network::TcpConnectionHandler& parent, ActiveTcpListener::ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config) - : ActiveStreamListenerBase(parent, parent.dispatcher(), std::move(listener), config), + : OwnedActiveStreamListenerBase(parent, parent.dispatcher(), std::move(listener), config), tcp_conn_handler_(parent) { config.connectionBalancer().registerHandler(*this); } @@ -62,53 +61,17 @@ ActiveTcpListener::~ActiveTcpListener() { config_->name(), numConnections())); } -void ActiveTcpListener::removeConnection(ActiveTcpConnection& connection) { - ENVOY_CONN_LOG(debug, "adding to cleanup list", *connection.connection_); - ActiveConnections& active_connections = connection.active_connections_; - auto removed = connection.removeFromList(active_connections.connections_); - dispatcher().deferredDelete(std::move(removed)); - // Delete map entry only iff connections becomes empty. - if (active_connections.connections_.empty()) { - auto iter = connections_by_context_.find(&active_connections.filter_chain_); - ASSERT(iter != connections_by_context_.end()); - // To cover the lifetime of every single connection, Connections need to be deferred deleted - // because the previously contained connection is deferred deleted. - dispatcher().deferredDelete(std::move(iter->second)); - // The erase will break the iteration over the connections_by_context_ during the deletion. - if (!is_deleting_) { - connections_by_context_.erase(iter); - } - } -} - void ActiveTcpListener::updateListenerConfig(Network::ListenerConfig& config) { ENVOY_LOG(trace, "replacing listener ", config_->listenerTag(), " by ", config.listenerTag()); ASSERT(&config_->connectionBalancer() == &config.connectionBalancer()); config_ = &config; } -void ActiveTcpListener::removeFilterChain(const Network::FilterChain* filter_chain) { - auto iter = connections_by_context_.find(filter_chain); - if (iter == connections_by_context_.end()) { - // It is possible when listener is stopping. - } else { - auto& connections = iter->second->connections_; - while (!connections.empty()) { - connections.front()->connection_->close(Network::ConnectionCloseType::NoFlush); - } - // Since is_deleting_ is on, we need to manually remove the map value and drive the - // iterator. Defer delete connection container to avoid race condition in destroying - // connection. - dispatcher().deferredDelete(std::move(iter->second)); - connections_by_context_.erase(iter); - } -} - void ActiveTcpListener::onAccept(Network::ConnectionSocketPtr&& socket) { if (listenerConnectionLimitReached()) { - RELEASE_ASSERT(socket->addressProvider().remoteAddress() != nullptr, ""); + RELEASE_ASSERT(socket->connectionInfoProvider().remoteAddress() != nullptr, ""); ENVOY_LOG(trace, "closing connection from {}: listener connection limit reached for {}", - socket->addressProvider().remoteAddress()->asString(), config_->name()); + socket->connectionInfoProvider().remoteAddress()->asString(), config_->name()); socket->close(); stats_.downstream_cx_overflow_.inc(); return; @@ -167,27 +130,19 @@ void ActiveTcpListener::newActiveConnection(const Network::FilterChain& filter_c Network::ServerConnectionPtr server_conn_ptr, std::unique_ptr stream_info) { auto& active_connections = getOrCreateActiveConnections(filter_chain); - ActiveTcpConnectionPtr active_connection( - new ActiveTcpConnection(active_connections, std::move(server_conn_ptr), - dispatcher().timeSource(), std::move(stream_info))); + auto active_connection = + std::make_unique(active_connections, std::move(server_conn_ptr), + dispatcher().timeSource(), std::move(stream_info)); // If the connection is already closed, we can just let this connection immediately die. if (active_connection->connection_->state() != Network::Connection::State::Closed) { - ENVOY_CONN_LOG(debug, "new connection from {}", *active_connection->connection_, - active_connection->connection_->addressProvider().remoteAddress()->asString()); + ENVOY_CONN_LOG( + debug, "new connection from {}", *active_connection->connection_, + active_connection->connection_->connectionInfoProvider().remoteAddress()->asString()); active_connection->connection_->addConnectionCallbacks(*active_connection); LinkedList::moveIntoList(std::move(active_connection), active_connections.connections_); } } -ActiveConnections& -ActiveTcpListener::getOrCreateActiveConnections(const Network::FilterChain& filter_chain) { - ActiveConnectionCollectionPtr& connections = connections_by_context_[&filter_chain]; - if (connections == nullptr) { - connections = std::make_unique(*this, filter_chain); - } - return *connections; -} - void ActiveTcpListener::post(Network::ConnectionSocketPtr&& socket) { // It is not possible to capture a unique_ptr because the post() API copies the lambda, so we must // bundle the socket inside a shared_ptr that can be captured. @@ -207,61 +162,5 @@ void ActiveTcpListener::post(Network::ConnectionSocketPtr&& socket) { }); } -ActiveConnections::ActiveConnections(ActiveTcpListener& listener, - const Network::FilterChain& filter_chain) - : listener_(listener), filter_chain_(filter_chain) {} - -ActiveConnections::~ActiveConnections() { - // connections should be defer deleted already. - ASSERT(connections_.empty()); -} - -ActiveTcpConnection::ActiveTcpConnection(ActiveConnections& active_connections, - Network::ConnectionPtr&& new_connection, - TimeSource& time_source, - std::unique_ptr&& stream_info) - : stream_info_(std::move(stream_info)), active_connections_(active_connections), - connection_(std::move(new_connection)), - conn_length_(new Stats::HistogramCompletableTimespanImpl( - active_connections_.listener_.stats_.downstream_cx_length_ms_, time_source)) { - // We just universally set no delay on connections. Theoretically we might at some point want - // to make this configurable. - connection_->noDelay(true); - auto& listener = active_connections_.listener_; - listener.stats_.downstream_cx_total_.inc(); - listener.stats_.downstream_cx_active_.inc(); - listener.per_worker_stats_.downstream_cx_total_.inc(); - listener.per_worker_stats_.downstream_cx_active_.inc(); - - // Active connections on the handler (not listener). The per listener connections have already - // been incremented at this point either via the connection balancer or in the socket accept - // path if there is no configured balancer. - listener.parent_.incNumConnections(); -} - -ActiveTcpConnection::~ActiveTcpConnection() { - ActiveStreamListenerBase::emitLogs(*active_connections_.listener_.config_, *stream_info_); - auto& listener = active_connections_.listener_; - listener.stats_.downstream_cx_active_.dec(); - listener.stats_.downstream_cx_destroy_.inc(); - listener.per_worker_stats_.downstream_cx_active_.dec(); - conn_length_->complete(); - - // Active listener connections (not handler). - listener.decNumConnections(); - - // Active handler connections (not listener). - listener.parent_.decNumConnections(); -} - -void ActiveTcpConnection::onEvent(Network::ConnectionEvent event) { - ENVOY_LOG(trace, "[C{}] connection on event {}", connection_->id(), static_cast(event)); - // Any event leads to destruction of the connection. - if (event == Network::ConnectionEvent::LocalClose || - event == Network::ConnectionEvent::RemoteClose) { - active_connections_.listener_.removeConnection(*this); - } -} - } // namespace Server } // namespace Envoy diff --git a/source/server/active_tcp_listener.h b/source/server/active_tcp_listener.h index 9ea378f445395..00d93e744a8d7 100644 --- a/source/server/active_tcp_listener.h +++ b/source/server/active_tcp_listener.h @@ -1,7 +1,6 @@ #pragma once #include "envoy/event/dispatcher.h" -#include "envoy/stats/timespan.h" #include "envoy/stream_info/stream_info.h" #include "source/common/common/linked_object.h" @@ -11,12 +10,6 @@ namespace Envoy { namespace Server { - -struct ActiveTcpConnection; -using ActiveTcpConnectionPtr = std::unique_ptr; -class ActiveConnections; -using ActiveConnectionCollectionPtr = std::unique_ptr; - namespace { // Structure used to allow a unique_ptr to be captured in a posted lambda. See below. struct RebalancedSocket { @@ -29,7 +22,7 @@ using RebalancedSocketSharedPtr = std::shared_ptr; * Wrapper for an active tcp listener owned by this handler. */ class ActiveTcpListener final : public Network::TcpListenerCallbacks, - public ActiveStreamListenerBase, + public OwnedActiveStreamListenerBase, public Network::BalancedConnectionHandler { public: ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerConfig& config, @@ -77,28 +70,12 @@ class ActiveTcpListener final : public Network::TcpListenerCallbacks, Network::ServerConnectionPtr server_conn_ptr, std::unique_ptr stream_info) override; - /** - * Return the active connections container attached with the given filter chain. - */ - ActiveConnections& getOrCreateActiveConnections(const Network::FilterChain& filter_chain); - /** * Update the listener config. The follow up connections will see the new config. The existing * connections are not impacted. */ void updateListenerConfig(Network::ListenerConfig& config); - void removeFilterChain(const Network::FilterChain* filter_chain) override; - - /** - * Remove and destroy an active connection. - * @param connection supplies the connection to remove. - */ - void removeConnection(ActiveTcpConnection& connection); - - absl::flat_hash_map> - connections_by_context_; - Network::TcpConnectionHandler& tcp_conn_handler_; // The number of connections currently active on this listener. This is typically used for // connection balancing across per-handler listeners. @@ -106,44 +83,5 @@ class ActiveTcpListener final : public Network::TcpListenerCallbacks, }; using ActiveTcpListenerOptRef = absl::optional>; - -/** - * Wrapper for a group of active connections which are attached to the same filter chain context. - */ -class ActiveConnections : public Event::DeferredDeletable { -public: - ActiveConnections(ActiveTcpListener& listener, const Network::FilterChain& filter_chain); - ~ActiveConnections() override; - - // listener filter chain pair is the owner of the connections - ActiveTcpListener& listener_; - const Network::FilterChain& filter_chain_; - // Owned connections - std::list connections_; -}; - -/** - * Wrapper for an active TCP connection owned by this handler. - */ -struct ActiveTcpConnection : LinkedObject, - public Event::DeferredDeletable, - public Network::ConnectionCallbacks, - Logger::Loggable { - ActiveTcpConnection(ActiveConnections& active_connections, - Network::ConnectionPtr&& new_connection, TimeSource& time_system, - std::unique_ptr&& stream_info); - ~ActiveTcpConnection() override; - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override; - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - std::unique_ptr stream_info_; - ActiveConnections& active_connections_; - Network::ConnectionPtr connection_; - Stats::TimespanPtr conn_length_; -}; - } // namespace Server } // namespace Envoy diff --git a/source/server/active_tcp_socket.cc b/source/server/active_tcp_socket.cc index e939bffc26128..9e62fe585436a 100644 --- a/source/server/active_tcp_socket.cc +++ b/source/server/active_tcp_socket.cc @@ -15,7 +15,7 @@ ActiveTcpSocket::ActiveTcpSocket(ActiveStreamListenerBase& listener, hand_off_restored_destination_connections_(hand_off_restored_destination_connections), iter_(accept_filters_.end()), stream_info_(std::make_unique( - listener_.dispatcher().timeSource(), socket_->addressProviderSharedPtr(), + listener_.dispatcher().timeSource(), socket_->connectionInfoProviderSharedPtr(), StreamInfo::FilterState::LifeSpan::Connection)) { listener_.stats_.downstream_pre_cx_active_.inc(); } @@ -121,10 +121,10 @@ void ActiveTcpSocket::newConnection() { Network::BalancedConnectionHandlerOptRef new_listener; if (hand_off_restored_destination_connections_ && - socket_->addressProvider().localAddressRestored()) { + socket_->connectionInfoProvider().localAddressRestored()) { // Find a listener associated with the original destination address. new_listener = - listener_.getBalancedHandlerByAddress(*socket_->addressProvider().localAddress()); + listener_.getBalancedHandlerByAddress(*socket_->connectionInfoProvider().localAddress()); } if (new_listener.has_value()) { // Hands off connections redirected by iptables to the listener associated with the diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 314e2c298bf86..74c17040b261f 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -127,18 +127,19 @@ void AdminImpl::startHttpListener(const std::list& } null_overload_manager_.start(); socket_ = std::make_shared(address, socket_options, true); - // TODO(mattklein123): We lost error handling along the way for the listen() call. Add it back. - socket_->ioHandle().listen(ENVOY_TCP_BACKLOG_SIZE); + RELEASE_ASSERT(0 == socket_->ioHandle().listen(ENVOY_TCP_BACKLOG_SIZE).return_value_, + "listen() failed on admin listener"); socket_factory_ = std::make_unique(socket_); listener_ = std::make_unique(*this, std::move(listener_scope)); - ENVOY_LOG(info, "admin address: {}", socket().addressProvider().localAddress()->asString()); + ENVOY_LOG(info, "admin address: {}", + socket().connectionInfoProvider().localAddress()->asString()); if (!address_out_path.empty()) { std::ofstream address_out_file(address_out_path); if (!address_out_file) { ENVOY_LOG(critical, "cannot open admin address output file {} for writing.", address_out_path); } else { - address_out_file << socket_->addressProvider().localAddress()->asString(); + address_out_file << socket_->connectionInfoProvider().localAddress()->asString(); } } } diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index de0cf7a55e2ab..e99330c2c8fc7 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -319,7 +319,7 @@ class AdminImpl : public Admin, // Network::ListenSocketFactory Network::Socket::Type socketType() const override { return socket_->socketType(); } const Network::Address::InstanceConstSharedPtr& localAddress() const override { - return socket_->addressProvider().localAddress(); + return socket_->connectionInfoProvider().localAddress(); } Network::SocketSharedPtr getListenSocket(uint32_t) override { // This is only supposed to be called once. diff --git a/source/server/api_listener_impl.cc b/source/server/api_listener_impl.cc index 41f4d34195a64..423db7ffe9c5e 100644 --- a/source/server/api_listener_impl.cc +++ b/source/server/api_listener_impl.cc @@ -13,13 +13,18 @@ namespace Envoy { namespace Server { +bool isQuic(const envoy::config::listener::v3::Listener& config) { + return config.has_udp_listener_config() && config.udp_listener_config().has_quic_options(); +} + ApiListenerImplBase::ApiListenerImplBase(const envoy::config::listener::v3::Listener& config, ListenerManagerImpl& parent, const std::string& name) : config_(config), parent_(parent), name_(name), address_(Network::Address::resolveProtoAddress(config.address())), global_scope_(parent_.server_.stats().createScope("")), listener_scope_(parent_.server_.stats().createScope(fmt::format("listener.api.{}.", name_))), - factory_context_(parent_.server_, config_, *this, *global_scope_, *listener_scope_), + factory_context_(parent_.server_, config_, *this, *global_scope_, *listener_scope_, + isQuic(config)), read_callbacks_(SyntheticReadCallbacks(*this)) {} void ApiListenerImplBase::SyntheticReadCallbacks::SyntheticConnection::raiseConnectionEvent( diff --git a/source/server/api_listener_impl.h b/source/server/api_listener_impl.h index fdb8498ad1f0f..6d2b41bcbd3fc 100644 --- a/source/server/api_listener_impl.h +++ b/source/server/api_listener_impl.h @@ -79,9 +79,10 @@ class ApiListenerImplBase : public ApiListener, class SyntheticConnection : public Network::Connection { public: SyntheticConnection(SyntheticReadCallbacks& parent) - : parent_(parent), address_provider_(std::make_shared( - parent.parent_.address_, parent.parent_.address_)), - stream_info_(parent_.parent_.factory_context_.timeSource(), address_provider_), + : parent_(parent), + connection_info_provider_(std::make_shared( + parent.parent_.address_, parent.parent_.address_)), + stream_info_(parent_.parent_.factory_context_.timeSource(), connection_info_provider_), options_(std::make_shared>()) {} void raiseConnectionEvent(Network::ConnectionEvent event); @@ -120,11 +121,11 @@ class ApiListenerImplBase : public ApiListener, void readDisable(bool) override {} void detectEarlyCloseWhenReadDisabled(bool) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } bool readEnabled() const override { return true; } - const Network::SocketAddressSetter& addressProvider() const override { - return *address_provider_; + const Network::ConnectionInfoSetter& connectionInfoProvider() const override { + return *connection_info_provider_; } - Network::SocketAddressProviderSharedPtr addressProviderSharedPtr() const override { - return address_provider_; + Network::ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override { + return connection_info_provider_; } absl::optional unixSocketPeerCredentials() const override { @@ -152,7 +153,7 @@ class ApiListenerImplBase : public ApiListener, void dumpState(std::ostream& os, int) const override { os << "SyntheticConnection"; } SyntheticReadCallbacks& parent_; - Network::SocketAddressSetterSharedPtr address_provider_; + Network::ConnectionInfoSetterSharedPtr connection_info_provider_; StreamInfo::StreamInfoImpl stream_info_; Network::ConnectionSocket::OptionsSharedPtr options_; std::list callbacks_; diff --git a/source/server/config_validation/BUILD b/source/server/config_validation/BUILD index f33cd523ce856..bc6fd2391fcd5 100644 --- a/source/server/config_validation/BUILD +++ b/source/server/config_validation/BUILD @@ -26,6 +26,7 @@ envoy_cc_library( "//envoy/api:api_interface", "//envoy/filesystem:filesystem_interface", "//source/common/api:api_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/source/server/config_validation/api.cc b/source/server/config_validation/api.cc index 75f8e755797c2..c9d4a2546d625 100644 --- a/source/server/config_validation/api.cc +++ b/source/server/config_validation/api.cc @@ -8,8 +8,9 @@ namespace Api { ValidationImpl::ValidationImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, Event::TimeSystem& time_system, Filesystem::Instance& file_system, - Random::RandomGenerator& random_generator) - : Impl(thread_factory, stats_store, time_system, file_system, random_generator), + Random::RandomGenerator& random_generator, + const envoy::config::bootstrap::v3::Bootstrap& bootstrap) + : Impl(thread_factory, stats_store, time_system, file_system, random_generator, bootstrap), time_system_(time_system) {} Event::DispatcherPtr ValidationImpl::allocateDispatcher(const std::string& name) { diff --git a/source/server/config_validation/api.h b/source/server/config_validation/api.h index 6725362caa733..c1a64a65df54a 100644 --- a/source/server/config_validation/api.h +++ b/source/server/config_validation/api.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/api/api.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/timer.h" #include "envoy/filesystem/filesystem.h" @@ -17,7 +18,8 @@ class ValidationImpl : public Impl { public: ValidationImpl(Thread::ThreadFactory& thread_factory, Stats::Store& stats_store, Event::TimeSystem& time_system, Filesystem::Instance& file_system, - Random::RandomGenerator& random_generator); + Random::RandomGenerator& random_generator, + const envoy::config::bootstrap::v3::Bootstrap& bootstrap); Event::DispatcherPtr allocateDispatcher(const std::string& name) override; Event::DispatcherPtr allocateDispatcher(const std::string& name, diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index 3d37c7ac56c49..6a0039c7f189e 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -45,8 +45,9 @@ ValidationInstance::ValidationInstance( : options_(options), validation_context_(options_.allowUnknownStaticFields(), !options.rejectUnknownDynamicFields(), !options.ignoreUnknownDynamicFields()), - stats_store_(store), api_(new Api::ValidationImpl(thread_factory, store, time_system, - file_system, random_generator_)), + stats_store_(store), + api_(new Api::ValidationImpl(thread_factory, store, time_system, file_system, + random_generator_, bootstrap_)), dispatcher_(api_->allocateDispatcher("main_thread")), singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory())), access_log_manager_(options.fileFlushIntervalMsec(), *api_, *dispatcher_, access_log_lock, @@ -78,22 +79,21 @@ void ValidationInstance::initialize(const Options& options, // If we get all the way through that stripped-down initialization flow, to the point where we'd // be ready to serve, then the config has passed validation. // Handle configuration that needs to take place prior to the main configuration load. - envoy::config::bootstrap::v3::Bootstrap bootstrap; - InstanceUtil::loadBootstrapConfig(bootstrap, options, + InstanceUtil::loadBootstrapConfig(bootstrap_, options, messageValidationContext().staticValidationVisitor(), *api_); - Config::Utility::createTagProducer(bootstrap); - bootstrap.mutable_node()->set_hidden_envoy_deprecated_build_version(VersionInfo::version()); + Config::Utility::createTagProducer(bootstrap_); + bootstrap_.mutable_node()->set_hidden_envoy_deprecated_build_version(VersionInfo::version()); local_info_ = std::make_unique( - stats().symbolTable(), bootstrap.node(), bootstrap.node_context_params(), local_address, + stats().symbolTable(), bootstrap_.node(), bootstrap_.node_context_params(), local_address, options.serviceZone(), options.serviceClusterName(), options.serviceNodeName()); overload_manager_ = std::make_unique( - dispatcher(), stats(), threadLocal(), bootstrap.overload_manager(), + dispatcher(), stats(), threadLocal(), bootstrap_.overload_manager(), messageValidationContext().staticValidationVisitor(), *api_, options_); - Configuration::InitialImpl initial_config(bootstrap, options); - initial_config.initAdminAccessLog(bootstrap, *this); + Configuration::InitialImpl initial_config(bootstrap_); + initial_config.initAdminAccessLog(bootstrap_, *this); admin_ = std::make_unique(initial_config.admin().address()); listener_manager_ = std::make_unique(*this, *this, *this, false, quic_stat_names_); @@ -107,7 +107,7 @@ void ValidationInstance::initialize(const Options& options, localInfo(), *secret_manager_, messageValidationContext(), *api_, http_context_, grpc_context_, router_context_, accessLogManager(), singletonManager(), options, quic_stat_names_); - config_.initialize(bootstrap, *this, *cluster_manager_factory_); + config_.initialize(bootstrap_, *this, *cluster_manager_factory_); runtime().initialize(clusterManager()); clusterManager().setInitializedCb([this]() -> void { init_manager_.initialize(init_watcher_); }); } diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index 356769f2b5962..4509400fcc90c 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -2,6 +2,7 @@ #include +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/listener/v3/listener.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" @@ -112,7 +113,7 @@ class ValidationInstance final : Logger::Loggable, bool enableReusePortDefault() override { return true; } Configuration::StatsConfig& statsConfig() override { return config_.statsConfig(); } - envoy::config::bootstrap::v3::Bootstrap& bootstrap() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + envoy::config::bootstrap::v3::Bootstrap& bootstrap() override { return bootstrap_; } Configuration::ServerFactoryContext& serverFactoryContext() override { return server_contexts_; } Configuration::TransportSocketFactoryContext& transportSocketFactoryContext() override { return server_contexts_; @@ -195,6 +196,7 @@ class ValidationInstance final : Logger::Loggable, ProtobufMessage::ProdValidationContextImpl validation_context_; Stats::IsolatedStoreImpl& stats_store_; ThreadLocal::InstanceImpl thread_local_; + envoy::config::bootstrap::v3::Bootstrap bootstrap_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; std::unique_ptr admin_; diff --git a/source/server/configuration_impl.cc b/source/server/configuration_impl.cc index 3322819a08fd3..a4556846df540 100644 --- a/source/server/configuration_impl.cc +++ b/source/server/configuration_impl.cc @@ -192,9 +192,7 @@ WatchdogImpl::WatchdogImpl(const envoy::config::bootstrap::v3::Watchdog& watchdo actions_ = watchdog.actions(); } -InitialImpl::InitialImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, - const Options& options) - : enable_deprecated_v2_api_(options.bootstrapVersion() == 2u) { +InitialImpl::InitialImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { const auto& admin = bootstrap.admin(); admin_.profile_path_ = @@ -219,16 +217,6 @@ InitialImpl::InitialImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstra layered_runtime_.add_layers()->mutable_admin_layer(); } } - if (enable_deprecated_v2_api_) { - auto* enabled_deprecated_v2_api_layer = layered_runtime_.add_layers(); - enabled_deprecated_v2_api_layer->set_name("enabled_deprecated_v2_api (auto-injected)"); - auto* static_layer = enabled_deprecated_v2_api_layer->mutable_static_layer(); - ProtobufWkt::Value val; - val.set_bool_value(true); - (*static_layer - ->mutable_fields())["envoy.test_only.broken_in_production.enable_deprecated_v2_api"] = - val; - } } void InitialImpl::initAdminAccessLog(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, diff --git a/source/server/configuration_impl.h b/source/server/configuration_impl.h index b833c264bc535..7efa5e22fda54 100644 --- a/source/server/configuration_impl.h +++ b/source/server/configuration_impl.h @@ -170,7 +170,7 @@ class WatchdogImpl : public Watchdog { */ class InitialImpl : public Initial { public: - InitialImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap, const Options& options); + InitialImpl(const envoy::config::bootstrap::v3::Bootstrap& bootstrap); // Server::Configuration::Initial Admin& admin() override { return admin_; } @@ -199,7 +199,6 @@ class InitialImpl : public Initial { Network::Socket::OptionsSharedPtr socket_options_; }; - const bool enable_deprecated_v2_api_; AdminImpl admin_; absl::optional flags_path_; envoy::config::bootstrap::v3::LayeredRuntime layered_runtime_; diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 095d16eb6b505..01e63097f2c63 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -31,7 +31,9 @@ Network::Address::InstanceConstSharedPtr fakeAddress() { PerFilterChainFactoryContextImpl::PerFilterChainFactoryContextImpl( Configuration::FactoryContext& parent_context, Init::Manager& init_manager) - : parent_context_(parent_context), init_manager_(init_manager) {} + : parent_context_(parent_context), scope_(parent_context_.scope().createScope("")), + filter_chain_scope_(parent_context_.listenerScope().createScope("")), + init_manager_(init_manager) {} bool PerFilterChainFactoryContextImpl::drainClose() const { return is_draining_.load() || parent_context_.drainDecision().drainClose(); @@ -101,7 +103,7 @@ Envoy::Runtime::Loader& PerFilterChainFactoryContextImpl::runtime() { return parent_context_.runtime(); } -Stats::Scope& PerFilterChainFactoryContextImpl::scope() { return parent_context_.scope(); } +Stats::Scope& PerFilterChainFactoryContextImpl::scope() { return *scope_; } Singleton::Manager& PerFilterChainFactoryContextImpl::singletonManager() { return parent_context_.singletonManager(); @@ -135,8 +137,10 @@ PerFilterChainFactoryContextImpl::getTransportSocketFactoryContext() const { return parent_context_.getTransportSocketFactoryContext(); } -Stats::Scope& PerFilterChainFactoryContextImpl::listenerScope() { - return parent_context_.listenerScope(); +Stats::Scope& PerFilterChainFactoryContextImpl::listenerScope() { return *filter_chain_scope_; } + +bool PerFilterChainFactoryContextImpl::isQuicListener() const { + return parent_context_.isQuicListener(); } FilterChainManagerImpl::FilterChainManagerImpl( @@ -459,7 +463,7 @@ std::pair> makeCidrListEntry(const s const Network::FilterChain* FilterChainManagerImpl::findFilterChain(const Network::ConnectionSocket& socket) const { - const auto& address = socket.addressProvider().localAddress(); + const auto& address = socket.connectionInfoProvider().localAddress(); const Network::FilterChain* best_match_filter_chain = nullptr; // Match on destination port (only for IP addresses). @@ -489,7 +493,7 @@ FilterChainManagerImpl::findFilterChain(const Network::ConnectionSocket& socket) const Network::FilterChain* FilterChainManagerImpl::findFilterChainForDestinationIP( const DestinationIPsTrie& destination_ips_trie, const Network::ConnectionSocket& socket) const { - auto address = socket.addressProvider().localAddress(); + auto address = socket.connectionInfoProvider().localAddress(); if (address->type() != Network::Address::Type::Ip) { address = fakeAddress(); } @@ -578,7 +582,7 @@ const Network::FilterChain* FilterChainManagerImpl::findFilterChainForApplicatio const Network::FilterChain* FilterChainManagerImpl::findFilterChainForDirectSourceIP( const DirectSourceIPsTrie& direct_source_ips_trie, const Network::ConnectionSocket& socket) const { - auto address = socket.addressProvider().directRemoteAddress(); + auto address = socket.connectionInfoProvider().directRemoteAddress(); if (address->type() != Network::Address::Type::Ip) { address = fakeAddress(); } @@ -628,7 +632,7 @@ const Network::FilterChain* FilterChainManagerImpl::findFilterChainForSourceType const Network::FilterChain* FilterChainManagerImpl::findFilterChainForSourceIpAndPort( const SourceIPsTrie& source_ips_trie, const Network::ConnectionSocket& socket) const { - auto address = socket.addressProvider().remoteAddress(); + auto address = socket.connectionInfoProvider().remoteAddress(); if (address->type() != Network::Address::Type::Ip) { address = fakeAddress(); } @@ -742,9 +746,10 @@ Configuration::FilterChainFactoryContextPtr FilterChainManagerImpl::createFilter FactoryContextImpl::FactoryContextImpl(Server::Instance& server, const envoy::config::listener::v3::Listener& config, Network::DrainDecision& drain_decision, - Stats::Scope& global_scope, Stats::Scope& listener_scope) + Stats::Scope& global_scope, Stats::Scope& listener_scope, + bool is_quic) : server_(server), config_(config), drain_decision_(drain_decision), - global_scope_(global_scope), listener_scope_(listener_scope) {} + global_scope_(global_scope), listener_scope_(listener_scope), is_quic_(is_quic) {} AccessLog::AccessLogManager& FactoryContextImpl::accessLogManager() { return server_.accessLogManager(); @@ -791,5 +796,6 @@ envoy::config::core::v3::TrafficDirection FactoryContextImpl::direction() const } Network::DrainDecision& FactoryContextImpl::drainDecision() { return drain_decision_; } Stats::Scope& FactoryContextImpl::listenerScope() { return listener_scope_; } +bool FactoryContextImpl::isQuicListener() const { return is_quic_; } } // namespace Server } // namespace Envoy diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 4e6caf625dbf9..3919aeb81073d 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -81,11 +81,16 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor Configuration::ServerFactoryContext& getServerFactoryContext() const override; Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override; Stats::Scope& listenerScope() override; + bool isQuicListener() const override; void startDraining() override { is_draining_.store(true); } private: Configuration::FactoryContext& parent_context_; + // The scope that has empty prefix. + Stats::ScopePtr scope_; + // filter_chain_scope_ has the same prefix as listener owners scope. + Stats::ScopePtr filter_chain_scope_; Init::Manager& init_manager_; std::atomic is_draining_{false}; }; @@ -135,7 +140,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { public: FactoryContextImpl(Server::Instance& server, const envoy::config::listener::v3::Listener& config, Network::DrainDecision& drain_decision, Stats::Scope& global_scope, - Stats::Scope& listener_scope); + Stats::Scope& listener_scope, bool is_quic); // Configuration::FactoryContext AccessLog::AccessLogManager& accessLogManager() override; @@ -166,6 +171,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { envoy::config::core::v3::TrafficDirection direction() const override; Network::DrainDecision& drainDecision() override; Stats::Scope& listenerScope() override; + bool isQuicListener() const override; private: Server::Instance& server_; @@ -173,6 +179,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { Network::DrainDecision& drain_decision_; Stats::Scope& global_scope_; Stats::Scope& listener_scope_; + bool is_quic_; }; /** diff --git a/source/server/lds_api.cc b/source/server/lds_api.cc index 1a1f9f9aed988..349251d870a96 100644 --- a/source/server/lds_api.cc +++ b/source/server/lds_api.cc @@ -24,8 +24,8 @@ LdsApiImpl::LdsApiImpl(const envoy::config::core::v3::ConfigSource& lds_config, Upstream::ClusterManager& cm, Init::Manager& init_manager, Stats::Scope& scope, ListenerManager& lm, ProtobufMessage::ValidationVisitor& validation_visitor) - : Envoy::Config::SubscriptionBase( - lds_config.resource_api_version(), validation_visitor, "name"), + : Envoy::Config::SubscriptionBase(validation_visitor, + "name"), listener_manager_(lm), scope_(scope.createScope("listener_manager.lds.")), cm_(cm), init_target_("LDS", [this]() { subscription_->start({}); }) { const auto resource_name = getResourceName(); @@ -44,9 +44,8 @@ void LdsApiImpl::onConfigUpdate(const std::vector& a const std::string& system_version_info) { Config::ScopedResume maybe_resume_rds; if (cm_.adsMux()) { - const auto type_urls = - Config::getAllVersionTypeUrls(); - maybe_resume_rds = cm_.adsMux()->pause(type_urls); + const auto type_url = Config::getTypeUrl(); + maybe_resume_rds = cm_.adsMux()->pause(type_url); } bool any_applied = false; diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 44feb19180a99..c6a666477ce35 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -48,11 +48,18 @@ bool anyFilterChain( } bool usesProxyProto(const envoy::config::listener::v3::Listener& config) { - // TODO(#14085): `use_proxy_proto` should be deprecated. // Checking only the first or default filter chain is done for backwards compatibility. - return PROTOBUF_GET_WRAPPED_OR_DEFAULT( + const bool use_proxy_proto = PROTOBUF_GET_WRAPPED_OR_DEFAULT( config.filter_chains().empty() ? config.default_filter_chain() : config.filter_chains()[0], use_proxy_proto, false); + if (use_proxy_proto) { + ENVOY_LOG_MISC(warn, + "using deprecated field 'use_proxy_proto' is dangerous as it does not respect " + "listener filter order. Do not use this field and instead configure the proxy " + "proto listener filter directly."); + } + + return use_proxy_proto; } bool shouldBindToPort(const envoy::config::listener::v3::Listener& config) { @@ -86,7 +93,7 @@ ListenSocketFactoryImpl::ListenSocketFactoryImpl( sockets_.push_back(createListenSocketAndApplyOptions(factory, socket_type, 0)); if (sockets_[0] != nullptr && local_address_->ip() && local_address_->ip()->port() == 0) { - local_address_ = sockets_[0]->addressProvider().localAddress(); + local_address_ = sockets_[0]->connectionInfoProvider().localAddress(); } ENVOY_LOG(debug, "Set listener {} socket factory local address to {}", listener_name, local_address_->asString()); @@ -168,19 +175,33 @@ void ListenSocketFactoryImpl::doFinalPreWorkerInit() { return; } - for (auto& socket : sockets_) { - // TODO(mattklein123): At some point we lost error handling on this call which I think can - // technically fail (at least according to lingering code comments). Add error handling on this - // in a follow up. - socket->ioHandle().listen(tcp_backlog_size_); - + ASSERT(!sockets_.empty()); + auto listen_and_apply_options = [](Envoy::Network::SocketSharedPtr socket, int tcp_backlog_size) { + const auto rc = socket->ioHandle().listen(tcp_backlog_size); + if (rc.return_value_ != 0) { + throw EnvoyException(fmt::format("cannot listen() errno={}", rc.errno_)); + } if (!Network::Socket::applyOptions(socket->options(), *socket, envoy::config::core::v3::SocketOption::STATE_LISTENING)) { throw Network::SocketOptionException( fmt::format("cannot set post-listen socket option on socket: {}", - socket->addressProvider().localAddress()->asString())); + socket->connectionInfoProvider().localAddress()->asString())); } + }; + // On all platforms we should listen on the first socket. + auto iterator = sockets_.begin(); + listen_and_apply_options(*iterator, tcp_backlog_size_); + ++iterator; +#ifndef WIN32 + // With this implementation on Windows we only accept + // connections on Worker 1 and then we use the `ExactConnectionBalancer` + // to balance these connections to all workers. + // TODO(davinci26): We should update the behavior when socket duplication + // does not cause accepts to hang in the OS. + for (; iterator != sockets_.end(); ++iterator) { + listen_and_apply_options(*iterator, tcp_backlog_size_); } +#endif } ListenerFactoryContextBaseImpl::ListenerFactoryContextBaseImpl( @@ -193,7 +214,8 @@ ListenerFactoryContextBaseImpl::ListenerFactoryContextBaseImpl( !config.stat_prefix().empty() ? config.stat_prefix() : Network::Address::resolveProtoAddress(config.address())->asString()))), - validation_visitor_(validation_visitor), drain_manager_(std::move(drain_manager)) {} + validation_visitor_(validation_visitor), drain_manager_(std::move(drain_manager)), + is_quic_(config.udp_listener_config().has_quic_options()) {} AccessLog::AccessLogManager& ListenerFactoryContextBaseImpl::accessLogManager() { return server_.accessLogManager(); @@ -251,6 +273,7 @@ ListenerFactoryContextBaseImpl::getTransportSocketFactoryContext() const { return server_.transportSocketFactoryContext(); } Stats::Scope& ListenerFactoryContextBaseImpl::listenerScope() { return *listener_scope_; } +bool ListenerFactoryContextBaseImpl::isQuicListener() const { return is_quic_; } Network::DrainDecision& ListenerFactoryContextBaseImpl::drainDecision() { return *this; } Server::DrainManager& ListenerFactoryContextBaseImpl::drainManager() { return *drain_manager_; } @@ -303,6 +326,13 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, listener_init_target_.ready(); } }), + transport_factory_context_( + std::make_shared( + parent_.server_.admin(), parent_.server_.sslContextManager(), listenerScope(), + parent_.server_.clusterManager(), parent_.server_.localInfo(), + parent_.server_.dispatcher(), parent_.server_.stats(), + parent_.server_.singletonManager(), parent_.server_.threadLocal(), + validation_visitor_, parent_.server_.api(), parent_.server_.options())), quic_stat_names_(parent_.quicStatNames()) { const absl::optional runtime_val = @@ -373,6 +403,7 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, ASSERT(workers_started_); parent_.inPlaceFilterChainUpdate(*this); }), + transport_factory_context_(origin.transport_factory_context_), quic_stat_names_(parent_.quicStatNames()) { buildAccessLog(); auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); @@ -413,6 +444,10 @@ void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, udp_listener_config_ = std::make_unique(config_.udp_listener_config()); if (config_.udp_listener_config().has_quic_options()) { #ifdef ENVOY_ENABLE_QUIC + if (config_.has_connection_balance_config()) { + throw EnvoyException("connection_balance_config is configured for QUIC listener which " + "doesn't work with connection balancer."); + } udp_listener_config_->listener_factory_ = std::make_unique( config_.udp_listener_config().quic_options(), concurrency, quic_stat_names_); #if UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT @@ -521,13 +556,8 @@ void ListenerImpl::validateFilterChains(Network::Socket::Type socket_type) { } void ListenerImpl::buildFilterChains() { - Server::Configuration::TransportSocketFactoryContextImpl transport_factory_context( - parent_.server_.admin(), parent_.server_.sslContextManager(), listenerScope(), - parent_.server_.clusterManager(), parent_.server_.localInfo(), parent_.server_.dispatcher(), - parent_.server_.stats(), parent_.server_.singletonManager(), parent_.server_.threadLocal(), - validation_visitor_, parent_.server_.api(), parent_.server_.options()); - transport_factory_context.setInitManager(*dynamic_init_manager_); - ListenerFilterChainFactoryBuilder builder(*this, transport_factory_context); + transport_factory_context_->setInitManager(*dynamic_init_manager_); + ListenerFilterChainFactoryBuilder builder(*this, *transport_factory_context_); filter_chain_manager_.addFilterChains( config_.filter_chains(), config_.has_default_filter_chain() ? &config_.default_filter_chain() : nullptr, builder, @@ -537,6 +567,19 @@ void ListenerImpl::buildFilterChains() { void ListenerImpl::buildSocketOptions() { // TCP specific setup. if (connection_balancer_ == nullptr) { +#ifdef WIN32 + // On Windows we use the exact connection balancer to dispatch connections + // from worker 1 to all workers. This is a perf hit but it is the only way + // to make all the workers do work. + // TODO(davinci26): We can be faster here if we create a balancer implementation + // that dispatches the connection to a random thread. + ENVOY_LOG(warn, + "ExactBalance was forced enabled for TCP listener '{}' because " + "Envoy is running on Windows." + "ExactBalance is used to load balance connections between workers on Windows.", + config_.name()); + connection_balancer_ = std::make_shared(); +#else // Not in place listener update. if (config_.has_connection_balance_config()) { // Currently exact balance is the only supported type and there are no options. @@ -545,6 +588,7 @@ void ListenerImpl::buildSocketOptions() { } else { connection_balancer_ = std::make_shared(); } +#endif } if (config_.has_tcp_fast_open_queue_length()) { @@ -661,6 +705,9 @@ PerListenerFactoryContextImpl::getTransportSocketFactoryContext() const { Stats::Scope& PerListenerFactoryContextImpl::listenerScope() { return listener_factory_context_base_->listenerScope(); } +bool PerListenerFactoryContextImpl::isQuicListener() const { + return listener_factory_context_base_->isQuicListener(); +} Init::Manager& PerListenerFactoryContextImpl::initManager() { return listener_impl_.initManager(); } bool ListenerImpl::createNetworkFilterChain( diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index dfc04ef5b5a3e..0114ff9c9e33e 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -19,6 +19,7 @@ #include "source/common/init/target_impl.h" #include "source/common/quic/quic_stat_names.h" #include "source/server/filter_chain_manager_impl.h" +#include "source/server/transport_socket_config_impl.h" namespace Envoy { namespace Server { @@ -129,6 +130,7 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Configuration::ServerFactoryContext& getServerFactoryContext() const override; Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override; Stats::Scope& listenerScope() override; + bool isQuicListener() const override; // DrainDecision bool drainClose() const override { @@ -148,6 +150,7 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Stats::ScopePtr listener_scope_; // Stats with listener named scope. ProtobufMessage::ValidationVisitor& validation_visitor_; const Server::DrainManagerPtr drain_manager_; + bool is_quic_; }; class ListenerImpl; @@ -201,6 +204,7 @@ class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryConte Configuration::TransportSocketFactoryContext& getTransportSocketFactoryContext() const override; Stats::Scope& listenerScope() override; + bool isQuicListener() const override; // ListenerFactoryContext const Network::ListenerConfig& listenerConfig() const override; @@ -426,6 +430,8 @@ class ListenerImpl final : public Network::ListenerConfig, // Important: local_init_watcher_ must be the last field in the class to avoid unexpected watcher // callback during the destroy of ListenerImpl. Init::WatcherImpl local_init_watcher_; + std::shared_ptr + transport_factory_context_; Quic::QuicStatNames& quic_stat_names_; diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 5fd3db577d6b8..afd6ebd142d6d 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -16,7 +16,6 @@ #include "source/common/common/assert.h" #include "source/common/common/fmt.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/network/filter_matcher.h" #include "source/common/network/io_socket_handle_impl.h" #include "source/common/network/listen_socket_impl.h" @@ -72,7 +71,7 @@ envoy::admin::v3::ListenersConfigDump::DynamicListener* getOrCreateDynamicListen void fillState(envoy::admin::v3::ListenersConfigDump::DynamicListenerState& state, const ListenerImpl& listener) { state.set_version_info(listener.versionInfo()); - state.mutable_listener()->PackFrom(API_RECOVER_ORIGINAL(listener.config())); + state.mutable_listener()->PackFrom(listener.config()); TimestampUtil::systemClockToTimestamp(listener.last_updated_, *(state.mutable_last_updated())); } } // namespace @@ -87,10 +86,7 @@ std::vector ProdListenerComponentFactory::createNetwor ENVOY_LOG(debug, " name: {}", proto_config.name()); ENVOY_LOG(debug, " config: {}", MessageUtil::getJsonStringFromMessageOrError( - proto_config.has_typed_config() - ? static_cast(proto_config.typed_config()) - : static_cast( - proto_config.hidden_envoy_deprecated_config()))); + static_cast(proto_config.typed_config()))); // Now see if there is a factory that will accept the config. auto& factory = @@ -121,10 +117,7 @@ ProdListenerComponentFactory::createListenerFilterFactoryList_( ENVOY_LOG(debug, " name: {}", proto_config.name()); ENVOY_LOG(debug, " config: {}", MessageUtil::getJsonStringFromMessageOrError( - proto_config.has_typed_config() - ? static_cast(proto_config.typed_config()) - : static_cast( - proto_config.hidden_envoy_deprecated_config()))); + static_cast(proto_config.typed_config()))); // Now see if there is a factory that will accept the config. auto& factory = @@ -149,10 +142,7 @@ ProdListenerComponentFactory::createUdpListenerFilterFactoryList_( ENVOY_LOG(debug, " name: {}", proto_config.name()); ENVOY_LOG(debug, " config: {}", MessageUtil::getJsonStringFromMessageOrError( - proto_config.has_typed_config() - ? static_cast(proto_config.typed_config()) - : static_cast( - proto_config.hidden_envoy_deprecated_config()))); + static_cast(proto_config.typed_config()))); // Now see if there is a factory that will accept the config. auto& factory = @@ -274,7 +264,7 @@ ListenerManagerImpl::dumpListenerConfigs(const Matchers::StringMatcher& name_mat } if (listener->blockRemove()) { auto& static_listener = *config_dump->mutable_static_listeners()->Add(); - static_listener.mutable_listener()->PackFrom(API_RECOVER_ORIGINAL(listener->config())); + static_listener.mutable_listener()->PackFrom(listener->config()); TimestampUtil::systemClockToTimestamp(listener->last_updated_, *(static_listener.mutable_last_updated())); continue; @@ -378,7 +368,7 @@ bool ListenerManagerImpl::addOrUpdateListener(const envoy::config::listener::v3: TimestampUtil::systemClockToTimestamp(server_.api().timeSource().systemTime(), *(it->second->mutable_last_update_attempt())); it->second->set_details(e.what()); - it->second->mutable_failed_configuration()->PackFrom(API_RECOVER_ORIGINAL(config)); + it->second->mutable_failed_configuration()->PackFrom(config); throw e; } error_state_tracker_.erase(it); @@ -930,6 +920,15 @@ Network::DrainableFilterChainSharedPtr ListenerFilterChainFactoryBuilder::buildF "{}. \nUse QuicDownstreamTransport instead.", transport_socket.DebugString())); } + const std::string hcm_str = + "type.googleapis.com/" + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; + if (is_quic && (filter_chain.filters().size() != 1 || + filter_chain.filters(0).typed_config().type_url() != hcm_str)) { + throw EnvoyException(fmt::format( + "error building network filter chain for quic listener: requires exactly one http_" + "connection_manager filter.")); + } #else // When QUIC is compiled out it should not be possible to configure either the QUIC transport // socket or the QUIC listener and get to this point. diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 57becc5a89e4a..7ef794c2098fb 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -71,11 +71,6 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::ValueArg config_yaml( "", "config-yaml", "Inline YAML configuration, merges with the contents of --config-path", false, "", "string", cmd); - TCLAP::ValueArg bootstrap_version( - "", "bootstrap-version", - "API version to parse the bootstrap config as (e.g. 3). If " - "unset, all known versions will be attempted", - false, 0, "string", cmd); TCLAP::SwitchArg allow_unknown_fields("", "allow-unknown-fields", "allow unknown fields in static configuration (DEPRECATED)", @@ -241,9 +236,6 @@ OptionsImpl::OptionsImpl(std::vector args, config_path_ = config_path.getValue(); config_yaml_ = config_yaml.getValue(); - if (bootstrap_version.getValue() != 0) { - bootstrap_version_ = bootstrap_version.getValue(); - } if (allow_unknown_fields.getValue()) { ENVOY_LOG(warn, "--allow-unknown-fields is deprecated, use --allow-unknown-static-fields instead."); @@ -347,7 +339,7 @@ void OptionsImpl::parseComponentLogLevels(const std::string& component_log_level uint32_t OptionsImpl::count() const { return count_; } -void OptionsImpl::logError(const std::string& error) const { throw MalformedArgvException(error); } +void OptionsImpl::logError(const std::string& error) { throw MalformedArgvException(error); } Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { Server::CommandLineOptionsPtr command_line_options = diff --git a/source/server/options_impl.h b/source/server/options_impl.h index 9efd569cf2bdf..697dee2440f15 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -44,7 +44,8 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable args, const HotRestartVersionCb& hot_restart_version_cb, spdlog::level::level_enum default_log_level); - // Test constructor; creates "reasonable" defaults, but desired values should be set explicitly. + // Default constructor; creates "reasonable" defaults, but desired values should be set + // explicitly. OptionsImpl(const std::string& service_cluster, const std::string& service_node, const std::string& service_zone, spdlog::level::level_enum log_level); @@ -58,7 +59,6 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable& bootstrapVersion() const override { return bootstrap_version_; } const std::string& configYaml() const override { return config_yaml_; } bool allowUnknownStaticFields() const override { return allow_unknown_static_fields_; } bool rejectUnknownDynamicFields() const override { return reject_unknown_dynamic_fields_; } @@ -165,9 +164,15 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable&); static std::string allowedLogLevels(); + /** + * Parses and validates the provided log_level, returning the corresponding + * spdlog::level::level_enum. + * @throws MalformedArgvException if the provided string is not a valid spdlog string. + */ + static spdlog::level::level_enum parseAndValidateLogLevel(absl::string_view log_level); + private: - void logError(const std::string& error) const; - spdlog::level::level_enum parseAndValidateLogLevel(absl::string_view log_level); + static void logError(const std::string& error); uint64_t base_id_{0}; bool use_dynamic_base_id_{false}; @@ -175,7 +180,6 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable bootstrap_version_; std::string config_yaml_; bool allow_unknown_static_fields_{false}; bool reject_unknown_dynamic_fields_{false}; diff --git a/source/server/overload_manager_impl.cc b/source/server/overload_manager_impl.cc index ee6e479a6d409..1e999e718ce94 100644 --- a/source/server/overload_manager_impl.cc +++ b/source/server/overload_manager_impl.cc @@ -106,12 +106,15 @@ class ScaledTriggerImpl final : public OverloadAction::Trigger { OverloadActionState state_; }; -Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) { - Stats::StatNameManagedStorage stat_name(absl::StrCat("overload.", a, ".", b), - scope.symbolTable()); +Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view name_of_stat) { + Stats::StatNameManagedStorage stat_name(name_of_stat, scope.symbolTable()); return scope.counterFromStatName(stat_name.statName()); } +Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) { + return makeCounter(scope, absl::StrCat("overload.", a, ".", b)); +} + Stats::Gauge& makeGauge(Stats::Scope& scope, absl::string_view a, absl::string_view b, Stats::Gauge::ImportMode import_mode) { Stats::StatNameManagedStorage stat_name(absl::StrCat("overload.", a, ".", b), @@ -299,6 +302,12 @@ OverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::S if (name == OverloadActionNames::get().ReduceTimeouts) { timer_minimums_ = std::make_shared( parseTimerMinimums(action.typed_config(), validation_visitor)); + } else if (name == OverloadActionNames::get().ResetStreams) { + if (!config.has_buffer_factory_config()) { + throw EnvoyException( + fmt::format("Overload action \"{}\" requires buffer_factory_config.", name)); + } + makeCounter(api.rootScope(), OverloadActionStatsNames::get().ResetStreamsCount); } else if (action.has_typed_config()) { throw EnvoyException(fmt::format( "Overload action \"{}\" has an unexpected value for the typed_config field", name)); diff --git a/source/server/proto_descriptors.cc b/source/server/proto_descriptors.cc index d7043ff18feb1..9638b84ba4f3b 100644 --- a/source/server/proto_descriptors.cc +++ b/source/server/proto_descriptors.cc @@ -12,25 +12,25 @@ namespace Server { void validateProtoDescriptors() { const auto methods = { - "envoy.api.v2.ClusterDiscoveryService.FetchClusters", - "envoy.api.v2.ClusterDiscoveryService.StreamClusters", - "envoy.api.v2.ClusterDiscoveryService.DeltaClusters", - "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints", - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", - "envoy.api.v2.EndpointDiscoveryService.DeltaEndpoints", - "envoy.api.v2.ListenerDiscoveryService.FetchListeners", - "envoy.api.v2.ListenerDiscoveryService.StreamListeners", - "envoy.api.v2.ListenerDiscoveryService.DeltaListeners", - "envoy.api.v2.RouteDiscoveryService.FetchRoutes", - "envoy.api.v2.RouteDiscoveryService.StreamRoutes", - "envoy.api.v2.RouteDiscoveryService.DeltaRoutes", - "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources", - "envoy.service.discovery.v2.AggregatedDiscoveryService.DeltaAggregatedResources", - "envoy.service.discovery.v2.HealthDiscoveryService.FetchHealthCheck", - "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck", - "envoy.service.discovery.v2.RuntimeDiscoveryService.FetchRuntime", - "envoy.service.discovery.v2.RuntimeDiscoveryService.StreamRuntime", - "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit", + "envoy.service.cluster.v3.ClusterDiscoveryService.FetchClusters", + "envoy.service.cluster.v3.ClusterDiscoveryService.StreamClusters", + "envoy.service.cluster.v3.ClusterDiscoveryService.DeltaClusters", + "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources", + "envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources", + "envoy.service.endpoint.v3.EndpointDiscoveryService.FetchEndpoints", + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints", + "envoy.service.endpoint.v3.EndpointDiscoveryService.DeltaEndpoints", + "envoy.service.health.v3.HealthDiscoveryService.FetchHealthCheck", + "envoy.service.health.v3.HealthDiscoveryService.StreamHealthCheck", + "envoy.service.listener.v3.ListenerDiscoveryService.FetchListeners", + "envoy.service.listener.v3.ListenerDiscoveryService.StreamListeners", + "envoy.service.listener.v3.ListenerDiscoveryService.DeltaListeners", + "envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit", + "envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", + "envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", + "envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", + "envoy.service.runtime.v3.RuntimeDiscoveryService.StreamRuntime", + "envoy.service.runtime.v3.RuntimeDiscoveryService.FetchRuntime", }; for (const auto& method : methods) { @@ -39,14 +39,15 @@ void validateProtoDescriptors() { } const auto types = { - "envoy.api.v2.Cluster", "envoy.api.v2.ClusterLoadAssignment", - "envoy.api.v2.Listener", "envoy.api.v2.RouteConfiguration", - "envoy.api.v2.route.VirtualHost", "envoy.api.v2.auth.Secret", + "envoy.config.cluster.v3.Cluster", "envoy.config.endpoint.v3.ClusterLoadAssignment", + "envoy.config.listener.v3.Listener", "envoy.config.route.v3.RouteConfiguration", + "envoy.config.route.v3.VirtualHost", "envoy.extensions.transport_sockets.tls.v3.Secret", }; for (const auto& type : types) { - RELEASE_ASSERT( - Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(type) != nullptr, ""); + RELEASE_ASSERT(Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(type) != + nullptr, + absl::StrCat("Unable to find message type for ", type)); } } diff --git a/source/server/server.cc b/source/server/server.cc index edb9c03045503..152058301be54 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -10,8 +10,6 @@ #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/common/exception.h" #include "envoy/common/time.h" -#include "envoy/config/bootstrap/v2/bootstrap.pb.h" -#include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.validate.h" #include "envoy/event/dispatcher.h" @@ -22,6 +20,7 @@ #include "envoy/server/bootstrap_extension_config.h" #include "envoy/server/instance.h" #include "envoy/server/options.h" +#include "envoy/stats/stats.h" #include "envoy/upstream/cluster_manager.h" #include "source/common/api/api_impl.h" @@ -32,7 +31,6 @@ #include "source/common/config/grpc_mux_impl.h" #include "source/common/config/new_grpc_mux_impl.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/config/xds_resource.h" #include "source/common/http/codes.h" #include "source/common/http/headers.h" @@ -76,10 +74,10 @@ InstanceImpl::InstanceImpl( time_source_(time_system), restarter_(restarter), start_time_(time(nullptr)), original_start_time_(start_time_), stats_store_(store), thread_local_(tls), random_generator_(std::move(random_generator)), - api_(new Api::Impl(thread_factory, store, time_system, file_system, *random_generator_, - process_context ? ProcessContextOptRef(std::ref(*process_context)) - : absl::nullopt, - watermark_factory)), + api_(new Api::Impl( + thread_factory, store, time_system, file_system, *random_generator_, bootstrap_, + process_context ? ProcessContextOptRef(std::ref(*process_context)) : absl::nullopt, + watermark_factory)), dispatcher_(api_->allocateDispatcher("main_thread")), singleton_manager_(new Singleton::ManagerImpl(api_->threadFactory())), handler_(new ConnectionHandlerImpl(*dispatcher_, absl::nullopt)), @@ -168,18 +166,16 @@ void InstanceImpl::failHealthcheck(bool fail) { } MetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store, TimeSource& time_source) { - snapped_counters_ = store.counters(); - counters_.reserve(snapped_counters_.size()); - for (const auto& counter : snapped_counters_) { - counters_.push_back({counter->latch(), *counter}); - } + store.forEachCounter([this](std::size_t size) mutable { counters_.reserve(size); }, + [this](Stats::Counter& counter) mutable { + counters_.push_back({counter.latch(), counter}); + }); - snapped_gauges_ = store.gauges(); - gauges_.reserve(snapped_gauges_.size()); - for (const auto& gauge : snapped_gauges_) { - ASSERT(gauge->importMode() != Stats::Gauge::ImportMode::Uninitialized); - gauges_.push_back(*gauge); - } + store.forEachGauge([this](std::size_t size) mutable { gauges_.reserve(size); }, + [this](Stats::Gauge& gauge) mutable { + ASSERT(gauge.importMode() != Stats::Gauge::ImportMode::Uninitialized); + gauges_.push_back(gauge); + }); snapped_histograms_ = store.histograms(); histograms_.reserve(snapped_histograms_.size()); @@ -187,11 +183,9 @@ MetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store, TimeSource& time_sou histograms_.push_back(*histogram); } - snapped_text_readouts_ = store.textReadouts(); - text_readouts_.reserve(snapped_text_readouts_.size()); - for (const auto& text_readout : snapped_text_readouts_) { - text_readouts_.push_back(*text_readout); - } + store.forEachTextReadout( + [this](std::size_t size) mutable { text_readouts_.reserve(size); }, + [this](Stats::TextReadout& text_readout) { text_readouts_.push_back(text_readout); }); snapshot_time_ = time_source.systemTime(); } @@ -281,21 +275,6 @@ ProcessContextOptRef InstanceImpl::processContext() { } namespace { -// Loads a bootstrap object, potentially at a specific version (upgrading if necessary). -void loadBootstrap(absl::optional bootstrap_version, - envoy::config::bootstrap::v3::Bootstrap& bootstrap, - std::function load_function) { - - if (!bootstrap_version.has_value()) { - load_function(bootstrap, true); - } else if (*bootstrap_version == 3) { - load_function(bootstrap, false); - } else if (*bootstrap_version == 2) { - throw EnvoyException("v2 bootstrap is deprecated and no longer supported."); - } else { - throw EnvoyException(fmt::format("Unknown bootstrap version {}.", *bootstrap_version)); - } -} bool canBeRegisteredAsInlineHeader(const Http::LowerCaseString& header_name) { // 'set-cookie' cannot currently be registered as an inline header. @@ -353,19 +332,11 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& } if (!config_path.empty()) { - loadBootstrap( - options.bootstrapVersion(), bootstrap, - [&config_path, &validation_visitor, &api](Protobuf::Message& message, bool do_boosting) { - MessageUtil::loadFromFile(config_path, message, validation_visitor, api, do_boosting); - }); + MessageUtil::loadFromFile(config_path, bootstrap, validation_visitor, api); } if (!config_yaml.empty()) { envoy::config::bootstrap::v3::Bootstrap bootstrap_override; - loadBootstrap( - options.bootstrapVersion(), bootstrap_override, - [&config_yaml, &validation_visitor](Protobuf::Message& message, bool do_boosting) { - MessageUtil::loadFromYaml(config_yaml, message, validation_visitor, do_boosting); - }); + MessageUtil::loadFromYaml(config_yaml, bootstrap_override, validation_visitor); // TODO(snowp): The fact that we do a merge here doesn't seem to be covered under test. bootstrap.MergeFrom(bootstrap_override); } @@ -464,15 +435,6 @@ void InstanceImpl::initialize(const Options& options, bootstrap_.mutable_node()->set_user_agent_name("envoy"); } - // If user has set user_agent_version in the bootstrap config, use it. - // Default to the internal server version. - if (!bootstrap_.node().user_agent_version().empty()) { - std::string user_agent_version = bootstrap_.node().user_agent_version(); - bootstrap_.mutable_node()->set_hidden_envoy_deprecated_build_version(user_agent_version); - } else { - bootstrap_.mutable_node()->set_hidden_envoy_deprecated_build_version(VersionInfo::version()); - } - // If user has set user_agent_build_version in the bootstrap config, use it. // Default to the internal server version. if (!bootstrap_.node().user_agent_build_version().has_version()) { @@ -496,7 +458,7 @@ void InstanceImpl::initialize(const Options& options, stats().symbolTable(), bootstrap_.node(), bootstrap_.node_context_params(), local_address, options.serviceZone(), options.serviceClusterName(), options.serviceNodeName()); - Configuration::InitialImpl initial_config(bootstrap_, options); + Configuration::InitialImpl initial_config(bootstrap_); // Learn original_start_time_ if our parent is still around to inform us of it. const auto parent_admin_shutdown_response = restarter_.sendParentAdminShutdownRequest(); @@ -712,14 +674,14 @@ void InstanceImpl::onRuntimeReady() { async_client_manager_ = std::make_unique( *config_.clusterManager(), thread_local_, time_source_, *api_, grpc_context_.statNames()); TRY_ASSERT_MAIN_THREAD { + Config::Utility::checkTransportVersion(hds_config); hds_delegate_ = std::make_unique( stats_store_, Config::Utility::factoryForGrpcApiConfigSource(*async_client_manager_, hds_config, stats_store_, false) ->createUncachedRawAsyncClient(), - Config::Utility::getAndCheckTransportVersion(hds_config), *dispatcher_, - Runtime::LoaderSingleton::get(), stats_store_, *ssl_context_manager_, info_factory_, - access_log_manager_, *config_.clusterManager(), *local_info_, *admin_, + *dispatcher_, Runtime::LoaderSingleton::get(), stats_store_, *ssl_context_manager_, + info_factory_, access_log_manager_, *config_.clusterManager(), *local_info_, *admin_, *singleton_manager_, thread_local_, messageValidationContext().dynamicValidationVisitor(), *api_, options_); } @@ -829,14 +791,13 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch return; } - const auto type_urls = - Config::getAllVersionTypeUrls(); + const auto type_url = Config::getTypeUrl(); // Pause RDS to ensure that we don't send any requests until we've // subscribed to all the RDS resources. The subscriptions happen in the init callbacks, // so we pause RDS until we've completed all the callbacks. Config::ScopedResume maybe_resume_rds; if (cm.adsMux()) { - maybe_resume_rds = cm.adsMux()->pause(type_urls); + maybe_resume_rds = cm.adsMux()->pause(type_url); } ENVOY_LOG(info, "all clusters initialized. initializing init manager"); diff --git a/source/server/server.h b/source/server/server.h index 36a017416317c..9234f6c2ae7b7 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -345,6 +345,7 @@ class InstanceImpl final : Logger::Loggable, Assert::ActionRegistrationPtr envoy_bug_action_registration_; ThreadLocal::Instance& thread_local_; Random::RandomGeneratorPtr random_generator_; + envoy::config::bootstrap::v3::Bootstrap bootstrap_; Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; std::unique_ptr admin_; @@ -367,7 +368,6 @@ class InstanceImpl final : Logger::Loggable, std::unique_ptr worker_guard_dog_; bool terminated_; std::unique_ptr file_logger_; - envoy::config::bootstrap::v3::Bootstrap bootstrap_; ConfigTracker::EntryOwnerPtr config_tracker_entry_; SystemTime bootstrap_config_update_time_; Grpc::AsyncClientManagerPtr async_client_manager_; diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index 55fc17a281490..3e0fdbc93e2bc 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -20,14 +20,16 @@ WorkerPtr ProdWorkerFactory::createWorker(uint32_t index, OverloadManager& overl api_.allocateDispatcher(worker_name, overload_manager.scaledTimerFactory())); auto conn_handler = std::make_unique(*dispatcher, index); return std::make_unique(tls_, hooks_, std::move(dispatcher), std::move(conn_handler), - overload_manager, api_); + overload_manager, api_, stat_names_); } WorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, Event::DispatcherPtr&& dispatcher, Network::ConnectionHandlerPtr handler, - OverloadManager& overload_manager, Api::Api& api) + OverloadManager& overload_manager, Api::Api& api, + WorkerStatNames& stat_names) : tls_(tls), hooks_(hooks), dispatcher_(std::move(dispatcher)), handler_(std::move(handler)), - api_(api) { + api_(api), reset_streams_counter_( + api_.rootScope().counterFromStatName(stat_names.reset_high_memory_stream_)) { tls_.registerThread(*dispatcher_, false); overload_manager.registerForAction( OverloadActionNames::get().StopAcceptingConnections, *dispatcher_, @@ -35,6 +37,9 @@ WorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, overload_manager.registerForAction( OverloadActionNames::get().RejectIncomingConnections, *dispatcher_, [this](OverloadActionState state) { rejectIncomingConnectionsCb(state); }); + overload_manager.registerForAction( + OverloadActionNames::get().ResetStreams, *dispatcher_, + [this](OverloadActionState state) { resetStreamsUsingExcessiveMemory(state); }); } void WorkerImpl::addListener(absl::optional overridden_listener, @@ -148,5 +153,11 @@ void WorkerImpl::rejectIncomingConnectionsCb(OverloadActionState state) { handler_->setListenerRejectFraction(state.value()); } +void WorkerImpl::resetStreamsUsingExcessiveMemory(OverloadActionState state) { + uint64_t streams_reset_count = + dispatcher_->getWatermarkFactory().resetAccountsGivenPressure(state.value().value()); + reset_streams_counter_.add(streams_reset_count); +} + } // namespace Server } // namespace Envoy diff --git a/source/server/worker_impl.h b/source/server/worker_impl.h index c5187c6716de3..87419667eed87 100644 --- a/source/server/worker_impl.h +++ b/source/server/worker_impl.h @@ -16,10 +16,20 @@ namespace Envoy { namespace Server { +// Captures a set of stat names for the workers. +struct WorkerStatNames { + explicit WorkerStatNames(Stats::SymbolTable& symbol_table) + : pool_(symbol_table), + reset_high_memory_stream_(pool_.add(OverloadActionStatsNames::get().ResetStreamsCount)) {} + + Stats::StatNamePool pool_; + Stats::StatName reset_high_memory_stream_; +}; + class ProdWorkerFactory : public WorkerFactory, Logger::Loggable { public: ProdWorkerFactory(ThreadLocal::Instance& tls, Api::Api& api, ListenerHooks& hooks) - : tls_(tls), api_(api), hooks_(hooks) {} + : tls_(tls), api_(api), stat_names_(api.rootScope().symbolTable()), hooks_(hooks) {} // Server::WorkerFactory WorkerPtr createWorker(uint32_t index, OverloadManager& overload_manager, @@ -28,6 +38,7 @@ class ProdWorkerFactory : public WorkerFactory, Logger::Loggable { public: WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, Event::DispatcherPtr&& dispatcher, Network::ConnectionHandlerPtr handler, OverloadManager& overload_manager, - Api::Api& api); + Api::Api& api, WorkerStatNames& stat_names); // Server::Worker void addListener(absl::optional overridden_listener, Network::ListenerConfig& listener, @@ -58,12 +69,14 @@ class WorkerImpl : public Worker, Logger::Loggable { void threadRoutine(GuardDog& guard_dog, const Event::PostCb& cb); void stopAcceptingConnectionsCb(OverloadActionState state); void rejectIncomingConnectionsCb(OverloadActionState state); + void resetStreamsUsingExcessiveMemory(OverloadActionState state); ThreadLocal::Instance& tls_; ListenerHooks& hooks_; Event::DispatcherPtr dispatcher_; Network::ConnectionHandlerPtr handler_; Api::Api& api_; + Stats::Counter& reset_streams_counter_; Thread::ThreadPtr thread_; WatchDogSharedPtr watch_dog_; }; diff --git a/test/README.md b/test/README.md index 85f12625902f1..a2c9298b857b0 100644 --- a/test/README.md +++ b/test/README.md @@ -67,7 +67,7 @@ instances of Protobuf::RepeatedPtrField element-by-element. Example: ```cpp -envoy::api::v2::DeltaDiscoveryRequest expected_request; +envoy::service::discovery::v3::DeltaDiscoveryRequest expected_request; // (not shown: set some fields of expected_request...) EXPECT_CALL(async_stream_, sendMessage(ProtoEqIgnoringField(expected_request, "response_nonce"), false)); diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index b52e2d038057a..126088c418d3f 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -39,10 +39,9 @@ namespace Envoy { namespace AccessLog { namespace { -envoy::config::accesslog::v3::AccessLog parseAccessLogFromV3Yaml(const std::string& yaml, - bool avoid_boosting = true) { +envoy::config::accesslog::v3::AccessLog parseAccessLogFromV3Yaml(const std::string& yaml) { envoy::config::accesslog::v3::AccessLog access_log; - TestUtility::loadFromYamlAndValidate(yaml, access_log, false, avoid_boosting); + TestUtility::loadFromYamlAndValidate(yaml, access_log); return access_log; } diff --git a/test/common/buffer/BUILD b/test/common/buffer/BUILD index bd01534ca6cae..43cfdcf596084 100644 --- a/test/common/buffer/BUILD +++ b/test/common/buffer/BUILD @@ -83,6 +83,18 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "buffer_memory_account_test", + srcs = ["buffer_memory_account_test.cc"], + deps = [ + "//source/common/buffer:buffer_lib", + "//test/integration:tracked_watermark_buffer_lib", + "//test/mocks/buffer:buffer_mocks", + "//test/mocks/http:stream_reset_handler_mock", + "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "zero_copy_input_stream_test", srcs = ["zero_copy_input_stream_test.cc"], diff --git a/test/common/buffer/buffer_memory_account_test.cc b/test/common/buffer/buffer_memory_account_test.cc new file mode 100644 index 0000000000000..a651eca14dffa --- /dev/null +++ b/test/common/buffer/buffer_memory_account_test.cc @@ -0,0 +1,578 @@ +#include + +#include "envoy/config/overload/v3/overload.pb.h" +#include "envoy/http/codec.h" + +#include "source/common/buffer/buffer_impl.h" + +#include "test/integration/tracked_watermark_buffer.h" +#include "test/mocks/http/stream_reset_handler.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Buffer { +namespace { + +using testing::_; + +using MemoryClassesToAccountsSet = std::array, + BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_>; + +constexpr uint64_t kMinimumBalanceToTrack = 1024 * 1024; +constexpr uint64_t kThresholdForFinalBucket = 128 * 1024 * 1024; + +// Gets the balance of an account assuming it's a BufferMemoryAccountImpl. +static int getBalance(const BufferMemoryAccountSharedPtr& account) { + return static_cast(account.get())->balance(); +} + +// Check the memory_classes_to_account is empty. +static void noAccountsTracked(MemoryClassesToAccountsSet& memory_classes_to_account) { + for (const auto& set : memory_classes_to_account) { + EXPECT_TRUE(set.empty()); + } +} + +class BufferMemoryAccountTest : public testing::Test { +protected: + TrackedWatermarkBufferFactory factory_{absl::bit_width(kMinimumBalanceToTrack)}; + Http::MockStreamResetHandler mock_reset_handler_; +}; + +TEST_F(BufferMemoryAccountTest, ManagesAccountBalance) { + auto account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer(account); + ASSERT_EQ(getBalance(account), 0); + + // Check the balance increases as expected. + { + // New slice created + buffer.add("Hello"); + EXPECT_EQ(getBalance(account), 4096); + + // Should just be added to existing slice. + buffer.add(" World!"); + EXPECT_EQ(getBalance(account), 4096); + + // Trigger new slice creation with add. + const std::string long_string(4096, 'a'); + buffer.add(long_string); + EXPECT_EQ(getBalance(account), 8192); + + // AppendForTest also adds new slice. + buffer.appendSliceForTest("Extra Slice"); + EXPECT_EQ(getBalance(account), 12288); + } + + // Check the balance drains as slices are consumed. + { + // Shouldn't trigger slice free yet + buffer.drain(4095); + EXPECT_EQ(getBalance(account), 12288); + + // Trigger slice reclaim. + buffer.drain(1); + EXPECT_EQ(getBalance(account), 8192); + + // Reclaim next slice + buffer.drain(std::string("Hello World!").length()); + EXPECT_EQ(getBalance(account), 4096); + + // Reclaim remaining + buffer.drain(std::string("Extra Slice").length()); + EXPECT_EQ(getBalance(account), 0); + } + + account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, BufferAccountsForUnownedSliceMovedInto) { + auto account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl accounted_buffer(account); + + Buffer::OwnedImpl unowned_buffer; + unowned_buffer.add("Unaccounted Slice"); + ASSERT_EQ(getBalance(account), 0); + + // Transfer over buffer + accounted_buffer.move(unowned_buffer); + EXPECT_EQ(getBalance(account), 4096); + + accounted_buffer.drain(accounted_buffer.length()); + EXPECT_EQ(getBalance(account), 0); + + account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, BufferFragmentsShouldNotHaveAnAssociatedAccount) { + auto buffer_one_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_one(buffer_one_account); + ASSERT_EQ(getBalance(buffer_one_account), 0); + + auto buffer_two_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_two(buffer_two_account); + ASSERT_EQ(getBalance(buffer_two_account), 0); + + const char data[] = "hello world"; + BufferFragmentImpl frag(data, 11, nullptr); + buffer_one.addBufferFragment(frag); + EXPECT_EQ(getBalance(buffer_one_account), 0); + EXPECT_EQ(buffer_one.length(), 11); + + // Transfer over buffer + buffer_two.move(buffer_one); + EXPECT_EQ(getBalance(buffer_two_account), 0); + EXPECT_EQ(buffer_two.length(), 11); + + buffer_two.drain(buffer_two.length()); + EXPECT_EQ(getBalance(buffer_two_account), 0); + EXPECT_EQ(buffer_two.length(), 0); + + buffer_one_account->clearDownstream(); + buffer_two_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, SliceRemainsAttachToOriginalAccountWhenMoved) { + auto buffer_one_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_one(buffer_one_account); + ASSERT_EQ(getBalance(buffer_one_account), 0); + + auto buffer_two_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_two(buffer_two_account); + ASSERT_EQ(getBalance(buffer_two_account), 0); + + buffer_one.add("Charged to Account One"); + EXPECT_EQ(getBalance(buffer_one_account), 4096); + EXPECT_EQ(getBalance(buffer_two_account), 0); + + // Transfer over buffer, still tied to account one. + buffer_two.move(buffer_one); + EXPECT_EQ(getBalance(buffer_one_account), 4096); + EXPECT_EQ(getBalance(buffer_two_account), 0); + + buffer_two.drain(buffer_two.length()); + EXPECT_EQ(getBalance(buffer_one_account), 0); + EXPECT_EQ(getBalance(buffer_two_account), 0); + + buffer_one_account->clearDownstream(); + buffer_two_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, + SliceRemainsAttachToOriginalAccountWhenMovedUnlessCoalescedIntoExistingSlice) { + auto buffer_one_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_one(buffer_one_account); + ASSERT_EQ(getBalance(buffer_one_account), 0); + + auto buffer_two_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_two(buffer_two_account); + ASSERT_EQ(getBalance(buffer_two_account), 0); + + buffer_one.add("Will Coalesce"); + buffer_two.add("To be Coalesce into:"); + EXPECT_EQ(getBalance(buffer_one_account), 4096); + EXPECT_EQ(getBalance(buffer_two_account), 4096); + + // Transfer over buffer, slices coalesce, crediting account one. + buffer_two.move(buffer_one); + EXPECT_EQ(getBalance(buffer_one_account), 0); + EXPECT_EQ(getBalance(buffer_two_account), 4096); + + buffer_two.drain(std::string("To be Coalesce into:Will Coalesce").length()); + EXPECT_EQ(getBalance(buffer_one_account), 0); + EXPECT_EQ(getBalance(buffer_two_account), 0); + + buffer_one_account->clearDownstream(); + buffer_two_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, SliceCanRemainAttachedToOriginalAccountWhenMovedAndCoalescedInto) { + auto buffer_one_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_one(buffer_one_account); + ASSERT_EQ(getBalance(buffer_one_account), 0); + + auto buffer_two_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_two(buffer_two_account); + ASSERT_EQ(getBalance(buffer_two_account), 0); + + auto buffer_three_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_three(buffer_three_account); + ASSERT_EQ(getBalance(buffer_three_account), 0); + + buffer_one.add("Will Coalesce"); + buffer_two.add("To be Coalesce into:"); + EXPECT_EQ(getBalance(buffer_one_account), 4096); + EXPECT_EQ(getBalance(buffer_two_account), 4096); + + // Transfer buffers, leading to slice coalescing in third buffer. + buffer_three.move(buffer_two); + buffer_three.move(buffer_one); + EXPECT_EQ(getBalance(buffer_one_account), 0); + EXPECT_EQ(getBalance(buffer_two_account), 4096); + EXPECT_EQ(getBalance(buffer_three_account), 0); + + buffer_three.drain(std::string("To be Coalesce into:Will Coalesce").length()); + EXPECT_EQ(getBalance(buffer_two_account), 0); + + buffer_one_account->clearDownstream(); + buffer_two_account->clearDownstream(); + buffer_three_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, LinearizedBufferShouldChargeItsAssociatedAccount) { + auto buffer_one_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_one(buffer_one_account); + ASSERT_EQ(getBalance(buffer_one_account), 0); + + auto buffer_two_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_two(buffer_two_account); + ASSERT_EQ(getBalance(buffer_two_account), 0); + + auto buffer_three_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_three(buffer_three_account); + ASSERT_EQ(getBalance(buffer_three_account), 0); + + const std::string long_string(4096, 'a'); + buffer_one.add(long_string); + buffer_two.add(long_string); + EXPECT_EQ(getBalance(buffer_one_account), 4096); + EXPECT_EQ(getBalance(buffer_two_account), 4096); + + // Move into the third buffer. + buffer_three.move(buffer_one); + buffer_three.move(buffer_two); + EXPECT_EQ(getBalance(buffer_one_account), 4096); + EXPECT_EQ(getBalance(buffer_two_account), 4096); + EXPECT_EQ(getBalance(buffer_three_account), 0); + + // Linearize, which does a copy out of the slices. + buffer_three.linearize(8192); + EXPECT_EQ(getBalance(buffer_one_account), 0); + EXPECT_EQ(getBalance(buffer_two_account), 0); + EXPECT_EQ(getBalance(buffer_three_account), 8192); + + buffer_one_account->clearDownstream(); + buffer_two_account->clearDownstream(); + buffer_three_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, ManagesAccountBalanceWhenPrepending) { + auto prepend_to_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_to_prepend_to(prepend_to_account); + ASSERT_EQ(getBalance(prepend_to_account), 0); + + auto prepend_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer_to_prepend(prepend_account); + ASSERT_EQ(getBalance(prepend_account), 0); + + Buffer::OwnedImpl unowned_buffer_to_prepend; + + unowned_buffer_to_prepend.add("World"); + buffer_to_prepend.add("Goodbye World"); + EXPECT_EQ(getBalance(prepend_account), 4096); + + // Prepend the buffers. + buffer_to_prepend_to.prepend(buffer_to_prepend); + EXPECT_EQ(getBalance(prepend_account), 4096); + EXPECT_EQ(getBalance(prepend_to_account), 0); + + buffer_to_prepend_to.prepend(unowned_buffer_to_prepend); + EXPECT_EQ(getBalance(prepend_to_account), 4096); + + // Prepend a string view. + buffer_to_prepend_to.prepend("Hello "); + EXPECT_EQ(getBalance(prepend_to_account), 8192); + + prepend_account->clearDownstream(); + prepend_to_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, ExtractingSliceWithExistingStorageCreditsAccountOnce) { + auto buffer_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer(buffer_account); + ASSERT_EQ(getBalance(buffer_account), 0); + + buffer.appendSliceForTest("Slice 1"); + buffer.appendSliceForTest("Slice 2"); + EXPECT_EQ(getBalance(buffer_account), 8192); + + // Account should only be credited when slice is extracted. + // Not on slice dtor. + { + auto slice = buffer.extractMutableFrontSlice(); + EXPECT_EQ(getBalance(buffer_account), 4096); + } + + EXPECT_EQ(getBalance(buffer_account), 4096); + + buffer_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, NewReservationSlicesOnlyChargedAfterCommit) { + auto buffer_account = factory_.createAccount(mock_reset_handler_); + Buffer::OwnedImpl buffer(buffer_account); + ASSERT_EQ(getBalance(buffer_account), 0); + + auto reservation = buffer.reserveForRead(); + EXPECT_EQ(getBalance(buffer_account), 0); + + // We should only be charged for the slices committed. + reservation.commit(16384); + EXPECT_EQ(getBalance(buffer_account), 16384); + + buffer_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, ReservationShouldNotChargeForExistingSlice) { + auto buffer_account = factory_.createAccount(mock_reset_handler_); + + Buffer::OwnedImpl buffer(buffer_account); + ASSERT_EQ(getBalance(buffer_account), 0); + + buffer.add("Many bytes remaining in this slice to use for reservation."); + EXPECT_EQ(getBalance(buffer_account), 4096); + + // The account shouldn't be charged again at commit since the commit + // uses memory from the slice already charged for. + auto reservation = buffer.reserveForRead(); + reservation.commit(2000); + EXPECT_EQ(getBalance(buffer_account), 4096); + + buffer_account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, AccountShouldNotBeTrackedByFactoryUnlessAboveMinimumBalance) { + auto account = factory_.createAccount(mock_reset_handler_); + + // Check not tracked + factory_.inspectMemoryClasses(noAccountsTracked); + + // Still below minimum + account->charge(2020); + factory_.inspectMemoryClasses(noAccountsTracked); + + account->charge(kMinimumBalanceToTrack); + + // Check now tracked + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[0].size(), 1); + }); + + account->credit(getBalance(account)); + account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, ClearingDownstreamShouldUnregisterTrackedAccounts) { + auto account = factory_.createAccount(mock_reset_handler_); + account->charge(kMinimumBalanceToTrack); + + // Check tracked + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[0].size(), 1); + }); + + account->clearDownstream(); + + // Check no longer tracked + factory_.inspectMemoryClasses(noAccountsTracked); + + account->credit(getBalance(account)); +} + +TEST_F(BufferMemoryAccountTest, AccountCanResetStream) { + auto account = factory_.createAccount(mock_reset_handler_); + + EXPECT_CALL(mock_reset_handler_, resetStream(_)); + account->resetDownstream(); + account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, FactoryTracksAccountCorrectlyAsBalanceIncreases) { + auto account = factory_.createAccount(mock_reset_handler_); + account->charge(kMinimumBalanceToTrack); + + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[0].size(), 1); + }); + + for (size_t i = 0; i < BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_ - 1; ++i) { + // Double the balance to enter the higher buckets. + account->charge(getBalance(account)); + factory_.inspectMemoryClasses([i](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[i].size(), 0); + EXPECT_EQ(memory_classes_to_account[i + 1].size(), 1); + }); + } + + account->credit(getBalance(account)); + account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, FactoryTracksAccountCorrectlyAsBalanceDecreases) { + auto account = factory_.createAccount(mock_reset_handler_); + account->charge(kThresholdForFinalBucket); + + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_ - 1].size(), + 1); + }); + + for (int i = BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_ - 2; i > 0; --i) { + // Halve the balance to enter the lower buckets. + account->credit(getBalance(account) / 2); + factory_.inspectMemoryClasses([i](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[i + 1].size(), 0); + EXPECT_EQ(memory_classes_to_account[i].size(), 1); + }); + } + + account->credit(getBalance(account)); + account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, SizeSaturatesInLargestBucket) { + auto account = factory_.createAccount(mock_reset_handler_); + account->charge(kThresholdForFinalBucket); + + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_ - 1].size(), + 1); + }); + + account->charge(getBalance(account)); + + // Remains in final bucket. + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_ - 1].size(), + 1); + }); + + account->credit(getBalance(account)); + account->clearDownstream(); +} + +TEST_F(BufferMemoryAccountTest, RemainsInSameBucketIfChangesWithinThreshold) { + auto account = factory_.createAccount(mock_reset_handler_); + account->charge(kMinimumBalanceToTrack); + + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[0].size(), 1); + }); + + // Charge to see in same bucket. + account->charge(kMinimumBalanceToTrack - 1); + + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[0].size(), 1); + }); + + // Credit to see in same bucket. + account->credit(kMinimumBalanceToTrack - 1); + + factory_.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[0].size(), 1); + }); + + account->credit(getBalance(account)); + account->clearDownstream(); +} + +TEST(WatermarkBufferFactoryTest, CanConfigureMinimumTrackingAmount) { + auto config = envoy::config::overload::v3::BufferFactoryConfig(); + config.set_minimum_account_to_track_power_of_two(3); + WatermarkBufferFactory factory(config); + EXPECT_EQ(factory.bitshift(), 2); +} + +TEST(WatermarkBufferFactoryTest, DefaultsToEffectivelyNotTracking) { + auto config = envoy::config::overload::v3::BufferFactoryConfig(); + WatermarkBufferFactory factory(config); + EXPECT_EQ(factory.bitshift(), 63); // Too large for any reasonable account size. +} + +TEST(WatermarkBufferFactoryTest, ShouldOnlyResetAllStreamsGreatThanOrEqualToProvidedIndex) { + TrackedWatermarkBufferFactory factory(absl::bit_width(kMinimumBalanceToTrack)); + Http::MockStreamResetHandler largest_stream_to_reset; + Http::MockStreamResetHandler stream_to_reset; + Http::MockStreamResetHandler stream_that_should_not_be_reset; + + auto largest_account_to_reset = factory.createAccount(largest_stream_to_reset); + auto account_to_reset = factory.createAccount(stream_to_reset); + auto account_to_not_reset = factory.createAccount(stream_that_should_not_be_reset); + + largest_account_to_reset->charge(kThresholdForFinalBucket); + account_to_reset->charge(2 * kMinimumBalanceToTrack); + account_to_not_reset->charge(kMinimumBalanceToTrack); + + // Check that all of the accounts are tracked + factory.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + EXPECT_EQ(memory_classes_to_account[0].size(), 1); + EXPECT_EQ(memory_classes_to_account[1].size(), 1); + EXPECT_EQ(memory_classes_to_account[7].size(), 1); + }); + + EXPECT_CALL(largest_stream_to_reset, resetStream(_)).WillOnce(Invoke([&]() { + largest_account_to_reset->credit(getBalance(largest_account_to_reset)); + largest_account_to_reset->clearDownstream(); + })); + + EXPECT_CALL(stream_to_reset, resetStream(_)).WillOnce(Invoke([&]() { + account_to_reset->credit(getBalance(account_to_reset)); + account_to_reset->clearDownstream(); + })); + + EXPECT_CALL(stream_that_should_not_be_reset, resetStream(_)).Times(0); + // Should call resetStream on all streams in bucket >= 1. + EXPECT_EQ(factory.resetAccountsGivenPressure(0.85), 2); + + account_to_not_reset->credit(kMinimumBalanceToTrack); + account_to_not_reset->clearDownstream(); +} + +TEST(WatermarkBufferFactoryTest, ComputesBucketToResetCorrectly) { + TrackedWatermarkBufferFactory factory(absl::bit_width(kMinimumBalanceToTrack)); + + // Create vector of accounts and handlers + std::vector> reset_handlers; + std::vector accounts; + uint32_t seed_account_balance = kMinimumBalanceToTrack; + + for (uint32_t i = 0; i < BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_; ++i) { + reset_handlers.emplace_back(std::make_unique()); + accounts.emplace_back(factory.createAccount(*(reset_handlers.back()))); + accounts.back()->charge(seed_account_balance); + seed_account_balance *= 2; + } + + // Check that all memory classes have a corresponding account + factory.inspectMemoryClasses([](MemoryClassesToAccountsSet& memory_classes_to_account) { + for (auto& account_set : memory_classes_to_account) { + EXPECT_EQ(account_set.size(), 1); + } + }); + + // Reset accounts checking correct threshold + float pressure = 0.0; + const float pressure_gradation = 1.0 / BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_; + for (uint32_t i = 0; i < BufferMemoryAccountImpl::NUM_MEMORY_CLASSES_; ++i) { + EXPECT_CALL(*reset_handlers.back(), resetStream(_)).WillOnce(Invoke([&]() { + auto current_account = accounts.back(); + current_account->credit(getBalance(current_account)); + current_account->clearDownstream(); + })); + + EXPECT_EQ(factory.resetAccountsGivenPressure(pressure), 1); + + // Move onto next reset handler and account + accounts.pop_back(); + reset_handlers.pop_back(); + + pressure += pressure_gradation; + } +} + +} // namespace +} // namespace Buffer +} // namespace Envoy diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index 55a877a01370e..854be0f4dd5e1 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -1265,277 +1265,6 @@ TEST_F(OwnedImplTest, FrontSlice) { EXPECT_EQ(1, buffer.frontSlice().len_); } -TEST(BufferMemoryAccountTest, ManagesAccountBalance) { - auto account = std::make_shared(); - Buffer::OwnedImpl buffer(account); - ASSERT_EQ(account->balance(), 0); - - // Check the balance increases as expected. - { - // New slice created - buffer.add("Hello"); - EXPECT_EQ(account->balance(), 4096); - - // Should just be added to existing slice. - buffer.add(" World!"); - EXPECT_EQ(account->balance(), 4096); - - // Trigger new slice creation with add. - const std::string long_string(4096, 'a'); - buffer.add(long_string); - EXPECT_EQ(account->balance(), 8192); - - // AppendForTest also adds new slice. - buffer.appendSliceForTest("Extra Slice"); - EXPECT_EQ(account->balance(), 12288); - } - - // Check the balance drains as slices are consumed. - { - // Shouldn't trigger slice free yet - buffer.drain(4095); - EXPECT_EQ(account->balance(), 12288); - - // Trigger slice reclaim. - buffer.drain(1); - EXPECT_EQ(account->balance(), 8192); - - // Reclaim next slice - buffer.drain(std::string("Hello World!").length()); - EXPECT_EQ(account->balance(), 4096); - - // Reclaim remaining - buffer.drain(std::string("Extra Slice").length()); - EXPECT_EQ(account->balance(), 0); - } -} - -TEST(BufferMemoryAccountTest, BufferAccountsForUnownedSliceMovedInto) { - auto account = std::make_shared(); - Buffer::OwnedImpl accounted_buffer(account); - - Buffer::OwnedImpl unowned_buffer; - unowned_buffer.add("Unaccounted Slice"); - ASSERT_EQ(account->balance(), 0); - - // Transfer over buffer - accounted_buffer.move(unowned_buffer); - EXPECT_EQ(account->balance(), 4096); - - accounted_buffer.drain(accounted_buffer.length()); - EXPECT_EQ(account->balance(), 0); -} - -TEST(BufferMemoryAccountTest, BufferFragmentsShouldNotHaveAnAssociatedAccount) { - auto buffer_one_account = std::make_shared(); - Buffer::OwnedImpl buffer_one(buffer_one_account); - ASSERT_EQ(buffer_one_account->balance(), 0); - - auto buffer_two_account = std::make_shared(); - Buffer::OwnedImpl buffer_two(buffer_two_account); - ASSERT_EQ(buffer_two_account->balance(), 0); - - const char data[] = "hello world"; - BufferFragmentImpl frag(data, 11, nullptr); - buffer_one.addBufferFragment(frag); - EXPECT_EQ(buffer_one_account->balance(), 0); - EXPECT_EQ(buffer_one.length(), 11); - - // Transfer over buffer - buffer_two.move(buffer_one); - EXPECT_EQ(buffer_two_account->balance(), 0); - EXPECT_EQ(buffer_two.length(), 11); - - buffer_two.drain(buffer_two.length()); - EXPECT_EQ(buffer_two_account->balance(), 0); - EXPECT_EQ(buffer_two.length(), 0); -} - -TEST(BufferMemoryAccountTest, SliceRemainsAttachToOriginalAccountWhenMoved) { - auto buffer_one_account = std::make_shared(); - Buffer::OwnedImpl buffer_one(buffer_one_account); - ASSERT_EQ(buffer_one_account->balance(), 0); - - auto buffer_two_account = std::make_shared(); - Buffer::OwnedImpl buffer_two(buffer_two_account); - ASSERT_EQ(buffer_two_account->balance(), 0); - - buffer_one.add("Charged to Account One"); - EXPECT_EQ(buffer_one_account->balance(), 4096); - EXPECT_EQ(buffer_two_account->balance(), 0); - - // Transfer over buffer, still tied to account one. - buffer_two.move(buffer_one); - EXPECT_EQ(buffer_one_account->balance(), 4096); - EXPECT_EQ(buffer_two_account->balance(), 0); - - buffer_two.drain(buffer_two.length()); - EXPECT_EQ(buffer_one_account->balance(), 0); - EXPECT_EQ(buffer_two_account->balance(), 0); -} - -TEST(BufferMemoryAccountTest, - SliceRemainsAttachToOriginalAccountWhenMovedUnlessCoalescedIntoExistingSlice) { - auto buffer_one_account = std::make_shared(); - Buffer::OwnedImpl buffer_one(buffer_one_account); - ASSERT_EQ(buffer_one_account->balance(), 0); - - auto buffer_two_account = std::make_shared(); - Buffer::OwnedImpl buffer_two(buffer_two_account); - ASSERT_EQ(buffer_two_account->balance(), 0); - - buffer_one.add("Will Coalesce"); - buffer_two.add("To be Coalesce into:"); - EXPECT_EQ(buffer_one_account->balance(), 4096); - EXPECT_EQ(buffer_two_account->balance(), 4096); - - // Transfer over buffer, slices coalesce, crediting account one. - buffer_two.move(buffer_one); - EXPECT_EQ(buffer_one_account->balance(), 0); - EXPECT_EQ(buffer_two_account->balance(), 4096); - - buffer_two.drain(std::string("To be Coalesce into:Will Coalesce").length()); - EXPECT_EQ(buffer_one_account->balance(), 0); - EXPECT_EQ(buffer_two_account->balance(), 0); -} - -TEST(BufferMemoryAccountTest, SliceCanRemainAttachedToOriginalAccountWhenMovedAndCoalescedInto) { - auto buffer_one_account = std::make_shared(); - Buffer::OwnedImpl buffer_one(buffer_one_account); - ASSERT_EQ(buffer_one_account->balance(), 0); - - auto buffer_two_account = std::make_shared(); - Buffer::OwnedImpl buffer_two(buffer_two_account); - ASSERT_EQ(buffer_two_account->balance(), 0); - - auto buffer_three_account = std::make_shared(); - Buffer::OwnedImpl buffer_three(buffer_three_account); - ASSERT_EQ(buffer_three_account->balance(), 0); - - buffer_one.add("Will Coalesce"); - buffer_two.add("To be Coalesce into:"); - EXPECT_EQ(buffer_one_account->balance(), 4096); - EXPECT_EQ(buffer_two_account->balance(), 4096); - - // Transfer buffers, leading to slice coalescing in third buffer. - buffer_three.move(buffer_two); - buffer_three.move(buffer_one); - EXPECT_EQ(buffer_one_account->balance(), 0); - EXPECT_EQ(buffer_two_account->balance(), 4096); - EXPECT_EQ(buffer_three_account->balance(), 0); - - buffer_three.drain(std::string("To be Coalesce into:Will Coalesce").length()); - EXPECT_EQ(buffer_two_account->balance(), 0); -} - -TEST(BufferMemoryAccountTest, LinearizedBufferShouldChargeItsAssociatedAccount) { - auto buffer_one_account = std::make_shared(); - Buffer::OwnedImpl buffer_one(buffer_one_account); - ASSERT_EQ(buffer_one_account->balance(), 0); - - auto buffer_two_account = std::make_shared(); - Buffer::OwnedImpl buffer_two(buffer_two_account); - ASSERT_EQ(buffer_two_account->balance(), 0); - - auto buffer_three_account = std::make_shared(); - Buffer::OwnedImpl buffer_three(buffer_three_account); - ASSERT_EQ(buffer_three_account->balance(), 0); - - const std::string long_string(4096, 'a'); - buffer_one.add(long_string); - buffer_two.add(long_string); - EXPECT_EQ(buffer_one_account->balance(), 4096); - EXPECT_EQ(buffer_two_account->balance(), 4096); - - // Move into the third buffer. - buffer_three.move(buffer_one); - buffer_three.move(buffer_two); - EXPECT_EQ(buffer_one_account->balance(), 4096); - EXPECT_EQ(buffer_two_account->balance(), 4096); - EXPECT_EQ(buffer_three_account->balance(), 0); - - // Linearize, which does a copy out of the slices. - buffer_three.linearize(8192); - EXPECT_EQ(buffer_one_account->balance(), 0); - EXPECT_EQ(buffer_two_account->balance(), 0); - EXPECT_EQ(buffer_three_account->balance(), 8192); -} - -TEST(BufferMemoryAccountTest, ManagesAccountBalanceWhenPrepending) { - auto prepend_to_account = std::make_shared(); - Buffer::OwnedImpl buffer_to_prepend_to(prepend_to_account); - ASSERT_EQ(prepend_to_account->balance(), 0); - - auto prepend_account = std::make_shared(); - Buffer::OwnedImpl buffer_to_prepend(prepend_account); - ASSERT_EQ(prepend_account->balance(), 0); - - Buffer::OwnedImpl unowned_buffer_to_prepend; - - unowned_buffer_to_prepend.add("World"); - buffer_to_prepend.add("Goodbye World"); - EXPECT_EQ(prepend_account->balance(), 4096); - - // Prepend the buffers. - buffer_to_prepend_to.prepend(buffer_to_prepend); - EXPECT_EQ(prepend_account->balance(), 4096); - EXPECT_EQ(prepend_to_account->balance(), 0); - - buffer_to_prepend_to.prepend(unowned_buffer_to_prepend); - EXPECT_EQ(prepend_to_account->balance(), 4096); - - // Prepend a string view. - buffer_to_prepend_to.prepend("Hello "); - EXPECT_EQ(prepend_to_account->balance(), 8192); -} - -TEST(BufferMemoryAccountTest, ExtractingSliceWithExistingStorageCreditsAccountOnce) { - auto buffer_account = std::make_shared(); - Buffer::OwnedImpl buffer(buffer_account); - ASSERT_EQ(buffer_account->balance(), 0); - - buffer.appendSliceForTest("Slice 1"); - buffer.appendSliceForTest("Slice 2"); - EXPECT_EQ(buffer_account->balance(), 8192); - - // Account should only be credited when slice is extracted. - // Not on slice dtor. - { - auto slice = buffer.extractMutableFrontSlice(); - EXPECT_EQ(buffer_account->balance(), 4096); - } - - EXPECT_EQ(buffer_account->balance(), 4096); -} - -TEST(BufferMemoryAccountTest, NewReservationSlicesOnlyChargedAfterCommit) { - auto buffer_account = std::make_shared(); - Buffer::OwnedImpl buffer(buffer_account); - ASSERT_EQ(buffer_account->balance(), 0); - - auto reservation = buffer.reserveForRead(); - EXPECT_EQ(buffer_account->balance(), 0); - - // We should only be charged for the slices committed. - reservation.commit(16384); - EXPECT_EQ(buffer_account->balance(), 16384); -} - -TEST(BufferMemoryAccountTest, ReservationShouldNotChargeForExistingSlice) { - auto buffer_account = std::make_shared(); - Buffer::OwnedImpl buffer(buffer_account); - ASSERT_EQ(buffer_account->balance(), 0); - - buffer.add("Many bytes remaining in this slice to use for reservation."); - EXPECT_EQ(buffer_account->balance(), 4096); - - // The account shouldn't be charged again at commit since the commit - // uses memory from the slice already charged for. - auto reservation = buffer.reserveForRead(); - reservation.commit(2000); - EXPECT_EQ(buffer_account->balance(), 4096); -} - } // namespace } // namespace Buffer } // namespace Envoy diff --git a/test/common/common/logger_test.cc b/test/common/common/logger_test.cc index 62fe85f33e811..ba0578abaecf4 100644 --- a/test/common/common/logger_test.cc +++ b/test/common/common/logger_test.cc @@ -9,6 +9,9 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::_; +using testing::Invoke; + namespace Envoy { namespace Logger { @@ -151,5 +154,36 @@ TEST_F(LoggerCustomFlagsTest, LogMessageAsJsonStringEscaped) { "\\\"transport: Error while dialing dial tcp [::1]:15012: connect: connection refused\\\""); } +struct NamedLogSink : SinkDelegate { + NamedLogSink(DelegatingLogSinkSharedPtr log_sink) : SinkDelegate(log_sink) { setDelegate(); } + ~NamedLogSink() override { restoreDelegate(); } + + MOCK_METHOD(void, log, (absl::string_view)); + MOCK_METHOD(void, logWithStableName, + (absl::string_view, absl::string_view, absl::string_view, absl::string_view)); + void flush() override {} +}; + +class NamedLogTest : public Loggable, public testing::Test {}; + +TEST_F(NamedLogTest, NamedLogsAreSentToSink) { + NamedLogSink sink(Envoy::Logger::Registry::getSink()); + + Envoy::Logger::Registry::setLogLevel(spdlog::level::info); + // Log level is above debug, so we shouldn't get any logs. + ENVOY_LOG_EVENT(debug, "test_event", "not logged"); + + Envoy::Logger::Registry::setLogLevel(spdlog::level::debug); + + EXPECT_CALL(sink, log(_)); + EXPECT_CALL(sink, logWithStableName("test_event", "debug", "assert", "test log 1")); + ENVOY_LOG_EVENT(debug, "test_event", "test {} {}", "log", 1); + + // Verify that ENVOY_LOG_EVENT_TO_LOGGER does the right thing. + EXPECT_CALL(sink, log(_)).WillOnce(Invoke([](auto log) { EXPECT_TRUE(log.find("[misc]")); })); + EXPECT_CALL(sink, logWithStableName("misc_event", "debug", "misc", "log")); + ENVOY_LOG_EVENT_TO_LOGGER(Registry::getLog(Id::misc), debug, "misc_event", "log"); +} + } // namespace Logger } // namespace Envoy diff --git a/test/common/common/matchers_test.cc b/test/common/common/matchers_test.cc index bcdf34441acc7..581967af8b3ea 100644 --- a/test/common/common/matchers_test.cc +++ b/test/common/common/matchers_test.cc @@ -280,6 +280,22 @@ TEST(MetadataTest, MatchDoubleListValue) { metadataValue.Clear(); } +TEST(MetadataTest, InvertMatch) { + envoy::config::core::v3::Metadata metadata; + Envoy::Config::Metadata::mutableMetadataValue(metadata, "envoy.filter.x", "label") + .set_string_value("prod"); + + envoy::type::matcher::v3::MetadataMatcher matcher; + matcher.set_filter("envoy.filter.x"); + matcher.add_path()->set_key("label"); + matcher.set_invert(true); + + matcher.mutable_value()->mutable_string_match()->set_exact("test"); + EXPECT_TRUE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata)); + matcher.mutable_value()->mutable_string_match()->set_exact("prod"); + EXPECT_FALSE(Envoy::Matchers::MetadataMatcher(matcher).match(metadata)); +} + TEST(StringMatcher, ExactMatchIgnoreCase) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("exact"); diff --git a/test/common/config/BUILD b/test/common/config/BUILD index aa0e857d6a77b..98e4105927875 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -16,15 +16,6 @@ envoy_cc_test( deps = ["@envoy_api//envoy/config/cluster/v3:pkg_cc_proto"], ) -envoy_cc_test( - name = "api_type_oracle_test", - srcs = ["api_type_oracle_test.cc"], - deps = [ - "//source/common/config:api_type_oracle_lib", - "@envoy_api//envoy/extensions/filters/http/ip_tagging/v3:pkg_cc_proto", - ], -) - envoy_cc_test( name = "decoded_resource_impl_test", srcs = ["decoded_resource_impl_test.cc"], @@ -62,7 +53,6 @@ envoy_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/runtime:runtime_mocks", "//test/test_common:logging_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", @@ -150,7 +140,6 @@ envoy_cc_test( "//source/common/config:api_version_lib", "//source/common/config:grpc_mux_lib", "//source/common/config:protobuf_link_hacks", - "//source/common/config:version_converter_lib", "//source/common/protobuf", "//source/common/stats:isolated_store_lib", "//test/common/stats:stat_test_utility_lib", @@ -165,7 +154,6 @@ envoy_cc_test( "//test/test_common:simulated_time_system_lib", "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], @@ -177,9 +165,10 @@ envoy_cc_test( deps = [ "//source/common/config:new_grpc_mux_lib", "//source/common/config:protobuf_link_hacks", - "//source/common/config:version_converter_lib", + "//source/common/config/xds_mux:grpc_mux_lib", "//source/common/protobuf", "//test/common/stats:stat_test_utility_lib", + "//test/config:v2_link_hacks", "//test/mocks:common_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", @@ -196,6 +185,33 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "xds_grpc_mux_impl_test", + srcs = ["xds_grpc_mux_impl_test.cc"], + deps = [ + "//source/common/config:protobuf_link_hacks", + "//source/common/config:resource_name_lib", + "//source/common/config/xds_mux:grpc_mux_lib", + "//source/common/protobuf", + "//test/common/stats:stat_test_utility_lib", + "//test/config:v2_link_hacks", + "//test/mocks:common_lib", + "//test/mocks/config:config_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/grpc:grpc_mocks", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/test_common:logging_lib", + "//test/test_common:resources_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:test_runtime_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/api/v2:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "grpc_stream_test", srcs = ["grpc_stream_test.cc"], @@ -230,7 +246,6 @@ envoy_cc_test_library( "//source/common/config:api_version_lib", "//source/common/config:grpc_mux_lib", "//source/common/config:grpc_subscription_lib", - "//source/common/config:version_converter_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", "//test/mocks/grpc:grpc_mocks", @@ -238,7 +253,6 @@ envoy_cc_test_library( "//test/mocks/upstream:cluster_manager_mocks", "//test/test_common:resources_lib", "//test/test_common:utility_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", @@ -252,7 +266,6 @@ envoy_cc_test_library( ":subscription_test_harness", "//source/common/common:utility_lib", "//source/common/config:new_grpc_mux_lib", - "//source/common/config:version_converter_lib", "//source/common/grpc:common_lib", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", @@ -313,6 +326,7 @@ envoy_cc_test( deps = [ "//source/common/config:subscription_factory_lib", "//source/common/config:xds_resource_lib", + "//test/config:v2_link_hacks", "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", "//test/mocks/filesystem:filesystem_mocks", @@ -357,6 +371,7 @@ envoy_cc_test( srcs = ["type_to_endpoint_test.cc"], deps = [ "//source/common/config:type_to_endpoint_lib", + "//test/config:v2_link_hacks", "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/service/route/v3:pkg_cc_proto", ], @@ -515,27 +530,6 @@ envoy_cc_test_library( hdrs = ["xds_test_utility.h"], ) -envoy_proto_library( - name = "version_converter_proto", - srcs = ["version_converter.proto"], -) - -envoy_cc_test( - name = "version_converter_test", - srcs = ["version_converter_test.cc"], - deps = [ - ":version_converter_proto_cc_proto", - "//source/common/config:api_version_lib", - "//source/common/config:version_converter_lib", - "//source/common/protobuf:well_known_lib", - "//test/test_common:test_time_lib", - "//test/test_common:utility_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", - "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", - ], -) - envoy_cc_test( name = "watched_directory_test", srcs = ["watched_directory_test.cc"], diff --git a/test/common/config/api_type_oracle_test.cc b/test/common/config/api_type_oracle_test.cc deleted file mode 100644 index d4cda061cde5b..0000000000000 --- a/test/common/config/api_type_oracle_test.cc +++ /dev/null @@ -1,26 +0,0 @@ -#include "envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h" - -#include "source/common/config/api_type_oracle.h" - -#include "gtest/gtest.h" - -// API_NO_BOOST_FILE - -namespace Envoy { -namespace Config { -namespace { - -TEST(ApiTypeOracleTest, All) { - envoy::extensions::filters::http::ip_tagging::v3::IPTagging v3_config; - ProtobufWkt::Any non_api_type; - - EXPECT_EQ(nullptr, - ApiTypeOracle::getEarlierVersionDescriptor(non_api_type.GetDescriptor()->full_name())); - EXPECT_NE(envoy::extensions::filters::http::ip_tagging::v3::IPTagging::descriptor()->full_name(), - ApiTypeOracle::getEarlierVersionMessageTypeName(v3_config.GetDescriptor()->full_name()) - .value()); -} - -} // namespace -} // namespace Config -} // namespace Envoy diff --git a/test/common/config/delta_subscription_impl_test.cc b/test/common/config/delta_subscription_impl_test.cc index 92e11a0f7f214..13ab2522e325d 100644 --- a/test/common/config/delta_subscription_impl_test.cc +++ b/test/common/config/delta_subscription_impl_test.cc @@ -1,4 +1,3 @@ -#include "envoy/api/v2/discovery.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -112,7 +111,7 @@ TEST_F(DeltaSubscriptionImplTest, PauseQueuesAcks) { // All ACK sendMessage()s will happen upon calling resume(). EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)) .WillRepeatedly(Invoke([this](Buffer::InstancePtr& buffer, bool) { - API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) message; + API_NO_BOOST(envoy::service::discovery::v3::DeltaDiscoveryRequest) message; EXPECT_TRUE(Grpc::Common::parseBufferInstance(std::move(buffer), message)); const std::string nonce = message.response_nonce(); if (!nonce.empty()) { @@ -141,11 +140,10 @@ TEST(DeltaSubscriptionImplFixturelessTest, NoGrpcStream) { const Protobuf::MethodDescriptor* method_descriptor = Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints"); + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"); NewGrpcMuxImplSharedPtr xds_context = std::make_shared( - std::unique_ptr(async_client), dispatcher, *method_descriptor, - envoy::config::core::v3::ApiVersion::AUTO, random, stats_store, rate_limit_settings, - local_info); + std::unique_ptr(async_client), dispatcher, *method_descriptor, random, + stats_store, rate_limit_settings, local_info); GrpcSubscriptionImplPtr subscription = std::make_unique( xds_context, callbacks, resource_decoder, stats, Config::TypeUrl::get().ClusterLoadAssignment, diff --git a/test/common/config/delta_subscription_state_test.cc b/test/common/config/delta_subscription_state_test.cc index 0aedc138039b1..5c816a194b723 100644 --- a/test/common/config/delta_subscription_state_test.cc +++ b/test/common/config/delta_subscription_state_test.cc @@ -27,7 +27,7 @@ namespace Envoy { namespace Config { namespace { -const char TypeUrl[] = "type.googleapis.com/envoy.api.v2.Cluster"; +const char TypeUrl[] = "type.googleapis.com/envoy.config.cluster.v3.Cluster"; enum class LegacyOrUnified { Legacy, Unified }; class DeltaSubscriptionStateTestBase : public testing::TestWithParam { diff --git a/test/common/config/delta_subscription_test_harness.h b/test/common/config/delta_subscription_test_harness.h index e74e33f40230e..f184a721ea35e 100644 --- a/test/common/config/delta_subscription_test_harness.h +++ b/test/common/config/delta_subscription_test_harness.h @@ -9,7 +9,6 @@ #include "source/common/config/grpc_subscription_impl.h" #include "source/common/config/new_grpc_mux_impl.h" -#include "source/common/config/version_converter.h" #include "source/common/grpc/common.h" #include "test/common/config/subscription_test_harness.h" @@ -36,15 +35,14 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { DeltaSubscriptionTestHarness() : DeltaSubscriptionTestHarness(std::chrono::milliseconds(0)) {} DeltaSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout) : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints")), + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints")), async_client_(new Grpc::MockAsyncClient()) { node_.set_id("fo0"); EXPECT_CALL(local_info_, node()).WillRepeatedly(testing::ReturnRef(node_)); EXPECT_CALL(dispatcher_, createTimer_(_)).Times(2); xds_context_ = std::make_shared( std::unique_ptr(async_client_), dispatcher_, *method_descriptor_, - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_store_, rate_limit_settings_, - local_info_); + random_, stats_store_, rate_limit_settings_, local_info_); subscription_ = std::make_unique( xds_context_, callbacks_, resource_decoder_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, init_fetch_timeout, false, @@ -92,8 +90,8 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { const std::set& unsubscribe, const Protobuf::int32 error_code, const std::string& error_message, std::map initial_resource_versions) { - API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) expected_request; - expected_request.mutable_node()->CopyFrom(API_DOWNGRADE(node_)); + API_NO_BOOST(envoy::service::discovery::v3::DeltaDiscoveryRequest) expected_request; + expected_request.mutable_node()->CopyFrom(node_); std::copy( subscribe.begin(), subscribe.end(), Protobuf::RepeatedFieldBackInserter(expected_request.mutable_resource_names_subscribe())); @@ -119,7 +117,7 @@ class DeltaSubscriptionTestHarness : public SubscriptionTestHarness { sendMessageRaw_( Grpc::ProtoBufferEqIgnoringField(expected_request, "response_nonce"), false)) .WillOnce([this](Buffer::InstancePtr& buffer, bool) { - API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) message; + API_NO_BOOST(envoy::service::discovery::v3::DeltaDiscoveryRequest) message; EXPECT_TRUE(Grpc::Common::parseBufferInstance(std::move(buffer), message)); const std::string nonce = message.response_nonce(); if (!nonce.empty()) { diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index 3eecdbb4f5dec..8fbd8de8cee08 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -1,6 +1,5 @@ #include -#include "envoy/api/v2/discovery.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -10,7 +9,6 @@ #include "source/common/config/grpc_mux_impl.h" #include "source/common/config/protobuf_link_hacks.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/protobuf/protobuf.h" #include "source/common/stats/isolated_store_impl.h" @@ -60,17 +58,16 @@ class GrpcMuxImplTestBase : public testing::Test { grpc_mux_ = std::make_unique( local_info_, std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, true); + "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources"), + random_, stats_, rate_limit_settings_, true); } void setup(const RateLimitSettings& custom_rate_limit_settings) { grpc_mux_ = std::make_unique( local_info_, std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, custom_rate_limit_settings, - true); + "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources"), + random_, stats_, custom_rate_limit_settings, true); } void expectSendMessage(const std::string& type_url, @@ -78,9 +75,9 @@ class GrpcMuxImplTestBase : public testing::Test { bool first = false, const std::string& nonce = "", const Protobuf::int32 error_code = Grpc::Status::WellKnownGrpcStatus::Ok, const std::string& error_message = "") { - API_NO_BOOST(envoy::api::v2::DiscoveryRequest) expected_request; + envoy::service::discovery::v3::DiscoveryRequest expected_request; if (first) { - expected_request.mutable_node()->CopyFrom(API_DOWNGRADE(local_info_.node())); + expected_request.mutable_node()->CopyFrom(local_info_.node()); } for (const auto& resource : resource_names) { expected_request.add_resource_names(resource); @@ -883,8 +880,8 @@ TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyClusterName) { GrpcMuxImpl( local_info_, std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, true), + "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources"), + random_, stats_, rate_limit_settings_, true), EnvoyException, "ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via " "--service-node and --service-cluster options."); @@ -896,8 +893,8 @@ TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyNodeName) { GrpcMuxImpl( local_info_, std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, true), + "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources"), + random_, stats_, rate_limit_settings_, true), EnvoyException, "ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via " "--service-node and --service-cluster options."); diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc index 1ae7e7a7c27b8..760b24a305bbf 100644 --- a/test/common/config/grpc_stream_test.cc +++ b/test/common/config/grpc_stream_test.cc @@ -29,7 +29,7 @@ class GrpcStreamTest : public testing::Test { async_client_(async_client_owner_.get()), grpc_stream_(&callbacks_, std::move(async_client_owner_), *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints"), + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"), random_, dispatcher_, stats_, rate_limit_settings_) {} NiceMock dispatcher_; diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index aa6aea0e5686c..5d49e74b2effb 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -2,7 +2,6 @@ #include -#include "envoy/api/v2/discovery.pb.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.validate.h" @@ -12,7 +11,6 @@ #include "source/common/config/api_version.h" #include "source/common/config/grpc_mux_impl.h" #include "source/common/config/grpc_subscription_impl.h" -#include "source/common/config/version_converter.h" #include "test/common/config/subscription_test_harness.h" #include "test/mocks/config/mocks.h" @@ -40,7 +38,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { GrpcSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout) : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints")), + "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints")), async_client_(new NiceMock()) { node_.set_id("fo0"); EXPECT_CALL(local_info_, node()).WillRepeatedly(testing::ReturnRef(node_)); @@ -50,8 +48,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { mux_ = std::make_shared( local_info_, std::unique_ptr(async_client_), dispatcher_, - *method_descriptor_, envoy::config::core::v3::ApiVersion::AUTO, random_, stats_store_, - rate_limit_settings_, true); + *method_descriptor_, random_, stats_store_, rate_limit_settings_, true); subscription_ = std::make_unique( mux_, callbacks_, resource_decoder_, stats_, Config::TypeUrl::get().ClusterLoadAssignment, dispatcher_, init_fetch_timeout, false, SubscriptionOptions()); @@ -73,9 +70,9 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { bool expect_node, const Protobuf::int32 error_code, const std::string& error_message) { UNREFERENCED_PARAMETER(expect_node); - API_NO_BOOST(envoy::api::v2::DiscoveryRequest) expected_request; + API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) expected_request; if (expect_node) { - expected_request.mutable_node()->CopyFrom(API_DOWNGRADE(node_)); + expected_request.mutable_node()->CopyFrom(node_); } for (const auto& cluster : cluster_names) { expected_request.add_resource_names(cluster); diff --git a/test/common/config/http_subscription_test_harness.h b/test/common/config/http_subscription_test_harness.h index b168ea3ab436b..027a65d077e01 100644 --- a/test/common/config/http_subscription_test_harness.h +++ b/test/common/config/http_subscription_test_harness.h @@ -41,7 +41,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { HttpSubscriptionTestHarness(std::chrono::milliseconds init_fetch_timeout) : method_descriptor_(Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints")), + "envoy.service.endpoint.v3.EndpointDiscoveryService.FetchEndpoints")), timer_(new Event::MockTimer()), http_request_(&cm_.thread_local_cluster_.async_client_) { node_.set_id("fo0"); EXPECT_CALL(local_info_, node()).WillOnce(testing::ReturnRef(node_)); @@ -54,8 +54,8 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { subscription_ = std::make_unique( local_info_, cm_, "eds_cluster", dispatcher_, random_gen_, std::chrono::milliseconds(1), std::chrono::milliseconds(1000), *method_descriptor_, - Config::TypeUrl::get().ClusterLoadAssignment, envoy::config::core::v3::ApiVersion::AUTO, - callbacks_, resource_decoder_, stats_, init_fetch_timeout, validation_visitor_); + Config::TypeUrl::get().ClusterLoadAssignment, callbacks_, resource_decoder_, stats_, + init_fetch_timeout, validation_visitor_); } ~HttpSubscriptionTestHarness() override { @@ -78,7 +78,7 @@ class HttpSubscriptionTestHarness : public SubscriptionTestHarness { EXPECT_EQ(Http::Headers::get().ContentTypeValues.Json, request->headers().getContentTypeValue()); EXPECT_EQ("eds_cluster", request->headers().getHostValue()); - EXPECT_EQ("/v2/discovery:endpoints", request->headers().getPathValue()); + EXPECT_EQ("/v3/discovery:endpoints", request->headers().getPathValue()); std::string expected_request = "{"; if (!version_.empty()) { expected_request += "\"version_info\":\"" + version + "\","; diff --git a/test/common/config/new_grpc_mux_impl_test.cc b/test/common/config/new_grpc_mux_impl_test.cc index ce917b39a0fc7..6fca22264265c 100644 --- a/test/common/config/new_grpc_mux_impl_test.cc +++ b/test/common/config/new_grpc_mux_impl_test.cc @@ -9,10 +9,11 @@ #include "source/common/config/new_grpc_mux_impl.h" #include "source/common/config/protobuf_link_hacks.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" +#include "source/common/config/xds_mux/grpc_mux_impl.h" #include "source/common/protobuf/protobuf.h" #include "test/common/stats/stat_test_utility.h" +#include "test/config/v2_link_hacks.h" #include "test/mocks/common.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" @@ -41,23 +42,34 @@ namespace Envoy { namespace Config { namespace { +enum class LegacyOrUnified { Legacy, Unified }; + // We test some mux specific stuff below, other unit test coverage for singleton use of // NewGrpcMuxImpl is provided in [grpc_]subscription_impl_test.cc. -class NewGrpcMuxImplTestBase : public testing::Test { +class NewGrpcMuxImplTestBase : public testing::TestWithParam { public: - NewGrpcMuxImplTestBase() + NewGrpcMuxImplTestBase(LegacyOrUnified legacy_or_unified) : async_client_(new Grpc::MockAsyncClient()), control_plane_stats_(Utility::generateControlPlaneStats(stats_)), control_plane_connected_state_( - stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)) {} + stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)), + should_use_unified_(legacy_or_unified == LegacyOrUnified::Unified) {} void setup() { + if (isUnifiedMuxTest()) { + grpc_mux_ = std::make_unique( + std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), + envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, + local_info_, false); + return; + } grpc_mux_ = std::make_unique( std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( - "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, - local_info_); + "envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources"), + random_, stats_, rate_limit_settings_, local_info_); } void expectSendMessage(const std::string& type_url, @@ -67,8 +79,8 @@ class NewGrpcMuxImplTestBase : public testing::Test { const Protobuf::int32 error_code = Grpc::Status::WellKnownGrpcStatus::Ok, const std::string& error_message = "", const std::map& initial_resource_versions = {}) { - API_NO_BOOST(envoy::api::v2::DeltaDiscoveryRequest) expected_request; - expected_request.mutable_node()->CopyFrom(API_DOWNGRADE(local_info_.node())); + API_NO_BOOST(envoy::service::discovery::v3::DeltaDiscoveryRequest) expected_request; + expected_request.mutable_node()->CopyFrom(local_info_.node()); for (const auto& resource : resource_names_subscribe) { expected_request.add_resource_names_subscribe(resource); } @@ -88,12 +100,60 @@ class NewGrpcMuxImplTestBase : public testing::Test { EXPECT_CALL(async_stream_, sendMessageRaw_(Grpc::ProtoBufferEq(expected_request), false)); } + void remoteClose() { + if (isUnifiedMuxTest()) { + dynamic_cast(grpc_mux_.get()) + ->grpcStreamForTest() + .onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + return; + } + dynamic_cast(grpc_mux_.get()) + ->grpcStreamForTest() + .onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + } + + void onDiscoveryResponse( + std::unique_ptr&& response) { + if (isUnifiedMuxTest()) { + dynamic_cast(grpc_mux_.get()) + ->onDiscoveryResponse(std::move(response), control_plane_stats_); + return; + } + dynamic_cast(grpc_mux_.get()) + ->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + void shutdownMux() { + if (isUnifiedMuxTest()) { + dynamic_cast(grpc_mux_.get())->shutdown(); + return; + } + dynamic_cast(grpc_mux_.get())->shutdown(); + } + + // the code is duplicated here, but all calls other than the check in return statement, return + // different types. + bool subscriptionExists(const std::string& type_url) const { + if (isUnifiedMuxTest()) { + auto* mux = dynamic_cast(grpc_mux_.get()); + auto& subscriptions = mux->subscriptions(); + auto sub = subscriptions.find(type_url); + return sub != subscriptions.end(); + } + auto* mux = dynamic_cast(grpc_mux_.get()); + auto& subscriptions = mux->subscriptions(); + auto sub = subscriptions.find(type_url); + return sub != subscriptions.end(); + } + + bool isUnifiedMuxTest() const { return should_use_unified_; } + NiceMock dispatcher_; NiceMock random_; Grpc::MockAsyncClient* async_client_; NiceMock async_stream_; NiceMock local_info_; - NewGrpcMuxImplPtr grpc_mux_; + std::unique_ptr grpc_mux_; NiceMock callbacks_; TestUtility::TestOpaqueResourceDecoderImpl resource_decoder_{"cluster_name"}; @@ -101,15 +161,20 @@ class NewGrpcMuxImplTestBase : public testing::Test { Envoy::Config::RateLimitSettings rate_limit_settings_; ControlPlaneStats control_plane_stats_; Stats::Gauge& control_plane_connected_state_; + bool should_use_unified_; }; class NewGrpcMuxImplTest : public NewGrpcMuxImplTestBase { public: + NewGrpcMuxImplTest() : NewGrpcMuxImplTestBase(GetParam()) {} Event::SimulatedTimeSystem time_system_; }; +INSTANTIATE_TEST_SUITE_P(NewGrpcMuxImplTest, NewGrpcMuxImplTest, + testing::ValuesIn({LegacyOrUnified::Legacy, LegacyOrUnified::Unified})); + // Validate behavior when dynamic context parameters are updated. -TEST_F(NewGrpcMuxImplTest, DynamicContextParameters) { +TEST_P(NewGrpcMuxImplTest, DynamicContextParameters) { setup(); InSequence s; auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_, {}); @@ -126,11 +191,14 @@ TEST_F(NewGrpcMuxImplTest, DynamicContextParameters) { // Update to bar type should resend Node. expectSendMessage("bar", {}, {}); local_info_.context_provider_.update_cb_handler_.runCallbacks("bar"); + expectSendMessage("foo", {}, {"x", "y"}); } // Validate cached nonces are cleared on reconnection. -TEST_F(NewGrpcMuxImplTest, ReconnectionResetsNonceAndAcks) { +// TODO (dmitri-d) remove this test when legacy implementations have been removed +// common mux functionality is tested in xds_grpc_mux_impl_test.cc +TEST_P(NewGrpcMuxImplTest, ReconnectionResetsNonceAndAcks) { Event::MockTimer* grpc_stream_retry_timer{new Event::MockTimer()}; Event::MockTimer* ttl_mgr_timer{new NiceMock()}; Event::TimerCb grpc_stream_retry_timer_cb; @@ -164,7 +232,7 @@ TEST_F(NewGrpcMuxImplTest, ReconnectionResetsNonceAndAcks) { add_response_resource("y", "3000", *response); // Pause EDS to allow the ACK to be cached. auto resume_eds = grpc_mux_->pause(type_url); - grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + onDiscoveryResponse(std::move(response)); // Now disconnect. // Grpc stream retry timer will kick in and reconnection will happen. EXPECT_CALL(*grpc_stream_retry_timer, enableTimer(_, _)) @@ -173,14 +241,14 @@ TEST_F(NewGrpcMuxImplTest, ReconnectionResetsNonceAndAcks) { // initial_resource_versions should contain client side all resource:version info. expectSendMessage(type_url, {"x", "y"}, {}, "", Grpc::Status::WellKnownGrpcStatus::Ok, "", {{"x", "2000"}, {"y", "3000"}}); - grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); - // Destruction of the EDS subscription will issue an "unsubscribe" request. + remoteClose(); + expectSendMessage(type_url, {}, {"x", "y"}); } // Validate resources are not sent on wildcard watch reconnection. // Regression test of https://github.com/envoyproxy/envoy/issues/16063. -TEST_F(NewGrpcMuxImplTest, ReconnectionResetsWildcardSubscription) { +TEST_P(NewGrpcMuxImplTest, ReconnectionResetsWildcardSubscription) { Event::MockTimer* grpc_stream_retry_timer{new Event::MockTimer()}; Event::MockTimer* ttl_mgr_timer{new NiceMock()}; Event::TimerCb grpc_stream_retry_timer_cb; @@ -231,7 +299,7 @@ TEST_F(NewGrpcMuxImplTest, ReconnectionResetsWildcardSubscription) { })); // Expect an ack with the nonce. expectSendMessage(type_url, {}, {}, "111"); - grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + onDiscoveryResponse(std::move(response)); } // Send another response with a different resource, but where EDS is paused. auto resume_eds = grpc_mux_->pause(type_url); @@ -246,7 +314,7 @@ TEST_F(NewGrpcMuxImplTest, ReconnectionResetsWildcardSubscription) { TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment)); })); // No ack reply is expected in this case, as EDS is suspended. - grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + onDiscoveryResponse(std::move(response)); } // Now disconnect. @@ -258,12 +326,12 @@ TEST_F(NewGrpcMuxImplTest, ReconnectionResetsWildcardSubscription) { // added resources because this is a wildcard request. expectSendMessage(type_url, {}, {}, "", Grpc::Status::WellKnownGrpcStatus::Ok, "", {{"x", "1000"}, {"y", "2000"}}); - grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + remoteClose(); // Destruction of wildcard will not issue unsubscribe requests for the resources. } // Test that we simply ignore a message for an unknown type_url, with no ill effects. -TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { +TEST_P(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { setup(); const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; @@ -279,7 +347,7 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { unexpected_response->set_system_version_info("0"); // empty response should call onConfigUpdate on wildcard watch EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "0")); - grpc_mux_->onDiscoveryResponse(std::move(unexpected_response), control_plane_stats_); + onDiscoveryResponse(std::move(unexpected_response)); } { auto response = std::make_unique(); @@ -296,13 +364,13 @@ TEST_F(NewGrpcMuxImplTest, DiscoveryResponseNonexistentSub) { EXPECT_TRUE( TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment)); })); - grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + onDiscoveryResponse(std::move(response)); } } // DeltaDiscoveryResponse that comes in response to an on-demand request updates the watch with // resource's name. The watch is initially created with an alias used in the on-demand request. -TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithAliases) { +TEST_P(NewGrpcMuxImplTest, ConfigUpdateWithAliases) { setup(); const std::string& type_url = Config::TypeUrl::get().VirtualHost; @@ -329,20 +397,17 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithAliases) { response->mutable_resources()->at(0).add_aliases("prefix/domain2.test"); EXPECT_LOG_CONTAINS("debug", "for " + type_url + " from HAL 9000", - grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_)); - - const auto& subscriptions = grpc_mux_->subscriptions(); - auto sub = subscriptions.find(type_url); - - EXPECT_TRUE(sub != subscriptions.end()); + onDiscoveryResponse(std::move(response))); + EXPECT_TRUE(subscriptionExists(type_url)); watch->update({}); + EXPECT_EQ("HAL 9000", stats_.textReadout("control_plane.identifier").value()); } // DeltaDiscoveryResponse that comes in response to an on-demand request that couldn't be resolved // will contain an empty Resource. The Resource's aliases field will be populated with the alias // originally used in the request. -TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) { +TEST_P(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) { setup(); const std::string& type_url = Config::TypeUrl::get().VirtualHost; @@ -363,7 +428,7 @@ TEST_F(NewGrpcMuxImplTest, ConfigUpdateWithNotFoundResponse) { } // Validate basic gRPC mux subscriptions to xdstp:// glob collections. -TEST_F(NewGrpcMuxImplTest, XdsTpGlobCollection) { +TEST_P(NewGrpcMuxImplTest, XdsTpGlobCollection) { setup(); const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; @@ -398,11 +463,11 @@ TEST_F(NewGrpcMuxImplTest, XdsTpGlobCollection) { EXPECT_EQ(1, added_resources.size()); EXPECT_TRUE(TestUtility::protoEqual(added_resources[0].get().resource(), load_assignment)); })); - grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + onDiscoveryResponse(std::move(response)); } // Validate basic gRPC mux subscriptions to xdstp:// singletons. -TEST_F(NewGrpcMuxImplTest, XdsTpSingleton) { +TEST_P(NewGrpcMuxImplTest, XdsTpSingleton) { setup(); const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; @@ -455,7 +520,35 @@ TEST_F(NewGrpcMuxImplTest, XdsTpSingleton) { EXPECT_TRUE(TestUtility::protoEqual(added_resources[1].get().resource(), load_assignment)); EXPECT_TRUE(TestUtility::protoEqual(added_resources[2].get().resource(), load_assignment)); })); - grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + onDiscoveryResponse(std::move(response)); +} + +TEST_P(NewGrpcMuxImplTest, RequestOnDemandUpdate) { + setup(); + + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_, {}); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage("foo", {"x", "y"}, {}); + grpc_mux_->start(); + + expectSendMessage("foo", {"z"}, {}); + grpc_mux_->requestOnDemandUpdate("foo", {"z"}); + + expectSendMessage("foo", {}, {"x", "y"}); +} + +TEST_P(NewGrpcMuxImplTest, Shutdown) { + setup(); + InSequence s; + auto foo_sub = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_, {}); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage("foo", {"x", "y"}, {}); + grpc_mux_->start(); + + shutdownMux(); + auto bar_sub = grpc_mux_->addWatch("bar", {"z"}, callbacks_, resource_decoder_, {}); + // We do not expect any messages to be sent here as the mux has been shutdown + // There won't be any unsubscribe messages for the legacy mux either for the same reason } } // namespace diff --git a/test/common/config/opaque_resource_decoder_impl_test.cc b/test/common/config/opaque_resource_decoder_impl_test.cc index f36e20f614177..332d436b32822 100644 --- a/test/common/config/opaque_resource_decoder_impl_test.cc +++ b/test/common/config/opaque_resource_decoder_impl_test.cc @@ -68,30 +68,6 @@ TEST_F(OpaqueResourceDecoderImplTest, ValidateIgnored) { EXPECT_EQ("fare", resource_decoder_.resourceName(*decoded_resource)); } -// Handling of smuggled deprecated fields during Any conversion. -TEST_F(OpaqueResourceDecoderImplTest, HiddenEnvoyDeprecatedFields) { - // This test is only valid in API-v3, and should be updated for API-v4, as - // the deprecated fields of API-v2 will be removed. - envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment = - TestUtility::parseYaml(R"EOF( - cluster_name: fare - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 1.2.3.4 - port_value: 80 - policy: - overprovisioning_factor: 100 - hidden_envoy_deprecated_disable_overprovisioning: true - )EOF"); - EXPECT_THROW_WITH_REGEX(decodeTypedResource(cluster_load_assignment), ProtoValidationException, - "Illegal use of hidden_envoy_deprecated_ V2 field " - "'envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.hidden_envoy_" - "deprecated_disable_overprovisioning'"); -} - // Happy path. TEST_F(OpaqueResourceDecoderImplTest, Success) { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_resource; diff --git a/test/common/config/sotw_subscription_state_test.cc b/test/common/config/sotw_subscription_state_test.cc index b7d5b5bae581d..8347d511fae62 100644 --- a/test/common/config/sotw_subscription_state_test.cc +++ b/test/common/config/sotw_subscription_state_test.cc @@ -28,9 +28,8 @@ class SotwSubscriptionStateTest : public testing::Test { SotwSubscriptionStateTest() : resource_decoder_("cluster_name") { ttl_timer_ = new Event::MockTimer(&dispatcher_); state_ = std::make_unique( - Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3), - callbacks_, dispatcher_, resource_decoder_); + Config::getTypeUrl(), callbacks_, + dispatcher_, resource_decoder_); state_->updateSubscriptionInterest({"name1", "name2", "name3"}, {}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name1", "name2", "name3")); @@ -71,8 +70,7 @@ class SotwSubscriptionStateTest : public testing::Test { envoy::service::discovery::v3::DiscoveryResponse response; response.set_version_info(version_info); response.set_nonce(nonce); - response.set_type_url(Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3)); + response.set_type_url(Config::getTypeUrl()); for (const auto& resource_name : resource_names) { response.add_resources()->PackFrom(resource(resource_name)); } @@ -87,8 +85,7 @@ class SotwSubscriptionStateTest : public testing::Test { envoy::service::discovery::v3::DiscoveryResponse response; response.set_version_info(version_info); response.set_nonce(nonce); - response.set_type_url(Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3)); + response.set_type_url(Config::getTypeUrl()); response.add_resources()->PackFrom(resource); EXPECT_CALL(callbacks_, onConfigUpdate(_, version_info)); return state_->handleResponse(response); diff --git a/test/common/config/subscription_factory_impl_test.cc b/test/common/config/subscription_factory_impl_test.cc index 465ea24496245..48c263af84c80 100644 --- a/test/common/config/subscription_factory_impl_test.cc +++ b/test/common/config/subscription_factory_impl_test.cc @@ -11,6 +11,7 @@ #include "source/common/config/subscription_factory_impl.h" #include "source/common/config/xds_resource.h" +#include "test/config/v2_link_hacks.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/filesystem/mocks.h" @@ -205,7 +206,7 @@ TEST_F(SubscriptionFactoryTest, FilesystemSubscriptionNonExistentFile) { envoy::config::core::v3::ConfigSource config; config.set_path("/blahblah"); EXPECT_THROW_WITH_MESSAGE(subscriptionFromConfigSource(config)->start({"foo"}), EnvoyException, - "envoy::api::v2::Path must refer to an existing path in the system: " + "paths must refer to an existing path in the system: " "'/blahblah' does not exist") } @@ -223,7 +224,7 @@ TEST_F(SubscriptionFactoryTest, FilesystemCollectionSubscription) { TEST_F(SubscriptionFactoryTest, FilesystemCollectionSubscriptionNonExistentFile) { EXPECT_THROW_WITH_MESSAGE(collectionSubscriptionFromUrl("file:///blahblah", {})->start({}), EnvoyException, - "envoy::api::v2::Path must refer to an existing path in the system: " + "paths must refer to an existing path in the system: " "'/blahblah' does not exist"); } diff --git a/test/common/config/type_to_endpoint_test.cc b/test/common/config/type_to_endpoint_test.cc index 1dd24c0c03083..0a1d877bb6131 100644 --- a/test/common/config/type_to_endpoint_test.cc +++ b/test/common/config/type_to_endpoint_test.cc @@ -3,6 +3,8 @@ #include "source/common/config/type_to_endpoint.h" +#include "test/config/v2_link_hacks.h" + #include "gtest/gtest.h" // API_NO_BOOST_FILE @@ -18,81 +20,30 @@ TEST(TypeToEndpoint, All) { envoy::service::route::v3::RdsDummy _v3_rds_dummy; // Delta gRPC endpoints. - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::AUTO) - .full_name()); - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V2) - .full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V3) - .full_name()); - - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", envoy::config::core::v3::ApiVersion::AUTO) .full_name()); - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.DeltaRoutes", - deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V2) - .full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.DeltaRoutes", deltaGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", envoy::config::core::v3::ApiVersion::V3) .full_name()); // SotW gRPC endpoints. - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::AUTO) - .full_name()); - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V2) - .full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V3) - .full_name()); - - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", envoy::config::core::v3::ApiVersion::AUTO) .full_name()); - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.StreamRoutes", - sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V2) - .full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.StreamRoutes", sotwGrpcMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", envoy::config::core::v3::ApiVersion::V3) .full_name()); // REST endpoints. - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::AUTO) - .full_name()); - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V2) - .full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.api.v2.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V3) - .full_name()); - - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", envoy::config::core::v3::ApiVersion::AUTO) .full_name()); - EXPECT_EQ("envoy.api.v2.RouteDiscoveryService.FetchRoutes", - restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - envoy::config::core::v3::ApiVersion::V2) - .full_name()); EXPECT_EQ("envoy.service.route.v3.RouteDiscoveryService.FetchRoutes", restMethod("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", envoy::config::core::v3::ApiVersion::V3) diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc index adc3c22f24e0a..95bf82487922e 100644 --- a/test/common/config/utility_test.cc +++ b/test/common/config/utility_test.cc @@ -110,7 +110,7 @@ TEST(UtilityTest, CheckFilesystemSubscriptionBackingPath) { EXPECT_THROW_WITH_MESSAGE( Utility::checkFilesystemSubscriptionBackingPath("foo", *api), EnvoyException, - "envoy::api::v2::Path must refer to an existing path in the system: 'foo' does not exist"); + "paths must refer to an existing path in the system: 'foo' does not exist"); std::string test_path = TestEnvironment::temporaryDirectory(); Utility::checkFilesystemSubscriptionBackingPath(test_path, *api); } @@ -287,8 +287,8 @@ TEST(UtilityTest, AnyWrongType) { typed_config.PackFrom(source_duration); ProtobufWkt::Timestamp out; EXPECT_THROW_WITH_REGEX( - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), out), + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getStrictValidationVisitor(), + out), EnvoyException, R"(Unable to unpack as google.protobuf.Timestamp: \[type.googleapis.com/google.protobuf.Duration\] .*)"); } @@ -344,64 +344,11 @@ TEST(UtilityTest, TypedStructToStruct) { packTypedStructIntoAny(typed_config, untyped_struct); ProtobufWkt::Struct out; - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), out); + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getStrictValidationVisitor(), out); EXPECT_THAT(out, ProtoEq(untyped_struct)); } -// Verify that regular Struct can be translated into an arbitrary message of correct type -// (v2 API, no upgrading). -TEST(UtilityTest, StructToClusterV2) { - ProtobufWkt::Any typed_config; - API_NO_BOOST(envoy::api::v2::Cluster) cluster; - ProtobufWkt::Struct cluster_struct; - const std::string cluster_config_yaml = R"EOF( - drain_connections_on_host_removal: true - )EOF"; - TestUtility::loadFromYaml(cluster_config_yaml, cluster); - TestUtility::loadFromYaml(cluster_config_yaml, cluster_struct); - - { - API_NO_BOOST(envoy::api::v2::Cluster) out; - Utility::translateOpaqueConfig({}, cluster_struct, ProtobufMessage::getNullValidationVisitor(), - out); - EXPECT_THAT(out, ProtoEq(cluster)); - } - { - API_NO_BOOST(envoy::api::v2::Cluster) out; - Utility::translateOpaqueConfig({}, cluster_struct, - ProtobufMessage::getStrictValidationVisitor(), out); - EXPECT_THAT(out, ProtoEq(cluster)); - } -} - -// Verify that regular Struct can be translated into an arbitrary message of correct type -// (v3 API, upgrading). -TEST(UtilityTest, StructToClusterV3) { - ProtobufWkt::Any typed_config; - API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; - ProtobufWkt::Struct cluster_struct; - const std::string cluster_config_yaml = R"EOF( - ignore_health_on_host_removal: true - )EOF"; - TestUtility::loadFromYaml(cluster_config_yaml, cluster); - TestUtility::loadFromYaml(cluster_config_yaml, cluster_struct); - - { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) out; - Utility::translateOpaqueConfig({}, cluster_struct, ProtobufMessage::getNullValidationVisitor(), - out); - EXPECT_THAT(out, ProtoEq(cluster)); - } - { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) out; - Utility::translateOpaqueConfig({}, cluster_struct, - ProtobufMessage::getStrictValidationVisitor(), out); - EXPECT_THAT(out, ProtoEq(cluster)); - } -} - // Verify that udpa.type.v1.TypedStruct can be translated into an arbitrary message of correct type // (v2 API, no upgrading). TEST(UtilityTest, TypedStructToClusterV2) { @@ -415,14 +362,13 @@ TEST(UtilityTest, TypedStructToClusterV2) { { API_NO_BOOST(envoy::api::v2::Cluster) out; - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getNullValidationVisitor(), out); + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getNullValidationVisitor(), out); EXPECT_THAT(out, ProtoEq(cluster)); } { API_NO_BOOST(envoy::api::v2::Cluster) out; - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), out); + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getStrictValidationVisitor(), + out); EXPECT_THAT(out, ProtoEq(cluster)); } } @@ -440,14 +386,13 @@ TEST(UtilityTest, TypedStructToClusterV3) { { API_NO_BOOST(envoy::config::cluster::v3::Cluster) out; - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getNullValidationVisitor(), out); + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getNullValidationVisitor(), out); EXPECT_THAT(out, ProtoEq(cluster)); } { API_NO_BOOST(envoy::config::cluster::v3::Cluster) out; - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), out); + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getStrictValidationVisitor(), + out); EXPECT_THAT(out, ProtoEq(cluster)); } } @@ -464,8 +409,7 @@ TEST(UtilityTest, AnyToClusterV2) { typed_config.PackFrom(cluster); API_NO_BOOST(envoy::api::v2::Cluster) out; - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), out); + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getStrictValidationVisitor(), out); EXPECT_THAT(out, ProtoEq(cluster)); } @@ -481,8 +425,7 @@ TEST(UtilityTest, AnyToClusterV3) { typed_config.PackFrom(cluster); API_NO_BOOST(envoy::config::cluster::v3::Cluster) out; - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), out); + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getStrictValidationVisitor(), out); EXPECT_THAT(out, ProtoEq(cluster)); } @@ -505,10 +448,9 @@ TEST(UtilityTest, TypedStructToInvalidType) { packTypedStructIntoAny(typed_config, bootstrap); ProtobufWkt::Any out; - EXPECT_THROW_WITH_REGEX( - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), out), - EnvoyException, "Unable to parse JSON as proto"); + EXPECT_THROW_WITH_REGEX(Utility::translateOpaqueConfig( + typed_config, ProtobufMessage::getStrictValidationVisitor(), out), + EnvoyException, "Unable to parse JSON as proto"); } // Verify that ProtobufWkt::Empty can load into a typed factory with an empty config proto @@ -518,8 +460,7 @@ TEST(UtilityTest, EmptyToEmptyConfig) { typed_config.PackFrom(empty_config); envoy::extensions::filters::http::cors::v3::Cors out; - Utility::translateOpaqueConfig(typed_config, ProtobufWkt::Struct(), - ProtobufMessage::getStrictValidationVisitor(), out); + Utility::translateOpaqueConfig(typed_config, ProtobufMessage::getStrictValidationVisitor(), out); EXPECT_THAT(out, ProtoEq(envoy::extensions::filters::http::cors::v3::Cors())); } diff --git a/test/common/config/version_converter.proto b/test/common/config/version_converter.proto deleted file mode 100644 index d58377665580b..0000000000000 --- a/test/common/config/version_converter.proto +++ /dev/null @@ -1,78 +0,0 @@ -syntax = "proto3"; - -package test.common.config; - -import "google/protobuf/any.proto"; - -enum PreviousEnum { - PREV_DEFAULT = 0; - PREV_DEPRECATED_VALUE = 1; - PREV_OTHER_VALUE = 2; -} - -enum NextEnum { - NEXT_DEFAULT = 0; - reserved 1; - NEXT_OTHER_VALUE = 2; -} - -message PreviousVersion { - // Singleton scalars. - string string_field = 1; - bytes bytes_field = 2; - int32 int32_field = 3; - int64 int64_field = 4; - uint32 uint32_field = 5; - uint64 uint64_field = 6; - double double_field = 7; - float float_field = 8; - bool bool_field = 9; - PreviousEnum enum_field = 10; - - // Singleton nested message. - message PreviousVersionNested { - google.protobuf.Any any_field = 1; - } - PreviousVersionNested nested_field = 11; - - // Repeated entities. - repeated string repeated_scalar_field = 12; - repeated PreviousVersionNested repeated_nested_field = 13; - - // Deprecations. - uint32 deprecated_field = 14 [deprecated = true]; - PreviousEnum enum_field_with_deprecated_value = 15; -} - -message NextVersion { - // Singleton scalars. - string string_field = 1; - bytes bytes_field = 2; - int32 int32_field = 3; - int64 int64_field = 4; - uint32 uint32_field = 5; - uint64 uint64_field = 6; - double double_field = 7; - float float_field = 8; - bool bool_field = 9; - PreviousEnum enum_field = 10; - - // Singleton nested message. - message NextVersionNested { - google.protobuf.Any any_field = 1; - } - NextVersionNested nested_field = 11; - - // Repeated entities. - repeated string repeated_scalar_field = 12; - repeated NextVersionNested repeated_nested_field = 13; - - // Deprecations. - reserved 14; - NextEnum enum_field_with_deprecated_value = 15; - message NewMessageInThisVerion { - } - - // New message present in this version but not PreviousVersion. - NewMessageInThisVerion new_message_in_this_version = 16; -} diff --git a/test/common/config/version_converter_test.cc b/test/common/config/version_converter_test.cc deleted file mode 100644 index 0b29da956e0b3..0000000000000 --- a/test/common/config/version_converter_test.cc +++ /dev/null @@ -1,199 +0,0 @@ -#include "envoy/api/v2/cluster.pb.h" -#include "envoy/api/v2/discovery.pb.h" -#include "envoy/config/cluster/v3/cluster.pb.h" -#include "envoy/service/discovery/v3/discovery.pb.h" - -#include "source/common/config/api_version.h" -#include "source/common/config/version_converter.h" -#include "source/common/protobuf/well_known.h" - -#include "test/common/config/version_converter.pb.h" -#include "test/test_common/utility.h" - -#include "gtest/gtest.h" - -namespace Envoy { -namespace Config { -namespace { - -bool hasOriginalTypeInformation(const Protobuf::Message& message) { - const Protobuf::Reflection* reflection = message.GetReflection(); - const auto& unknown_field_set = reflection->GetUnknownFields(message); - for (int i = 0; i < unknown_field_set.field_count(); ++i) { - const auto& unknown_field = unknown_field_set.field(i); - if (unknown_field.number() == ProtobufWellKnown::OriginalTypeFieldNumber) { - return true; - } - } - return false; -} - -// Wire-style upgrading between versions. -TEST(VersionConverterTest, Upgrade) { - // Create a v2 Cluster message with some fields set. - API_NO_BOOST(envoy::api::v2::Cluster) source; - source.mutable_load_assignment()->set_cluster_name("bar"); - source.mutable_eds_cluster_config()->set_service_name("foo"); - source.set_drain_connections_on_host_removal(true); - // Upgrade to a v3 Cluster. - API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; - VersionConverter::upgrade(source, dst); - // Verify fields in v3 Cluster. - EXPECT_TRUE(hasOriginalTypeInformation(dst)); - EXPECT_EQ("bar", dst.load_assignment().cluster_name()); - EXPECT_FALSE(hasOriginalTypeInformation(dst.load_assignment())); - EXPECT_EQ("foo", dst.eds_cluster_config().service_name()); - EXPECT_TRUE(hasOriginalTypeInformation(dst.eds_cluster_config())); - EXPECT_TRUE(dst.ignore_health_on_host_removal()); - // Recover a v2 Cluster from the v3 Cluster using original type information. - auto original_dynamic_msg = VersionConverter::recoverOriginal(dst); - const auto& original_msg = *original_dynamic_msg->msg_; - EXPECT_EQ("envoy.api.v2.Cluster", original_msg.GetDescriptor()->full_name()); - // Ensure that we erased any original type information and have the original - // message. - EXPECT_THAT(original_msg, ProtoEq(source)); - // Verify that sub-messages work with VersionConverter::recoverOriginal, i.e. - // we are propagating original type information. - auto original_dynamic_sub_msg = VersionConverter::recoverOriginal(dst.eds_cluster_config()); - const auto& original_sub_msg = *original_dynamic_sub_msg->msg_; - EXPECT_THAT(original_sub_msg, ProtoEq(source.eds_cluster_config())); -} - -// Empty upgrade between version_converter.proto entities. TODO(htuch): consider migrating all the -// upgrades in this test to version_converter.proto to reduce dependence on APIs that will be -// removed at `EOY`. -TEST(VersionConverterProto, UpgradeNextVersion) { - test::common::config::PreviousVersion source; - test::common::config::NextVersion dst; - VersionConverter::upgrade(source, dst); -} - -// Validate that even if we pass in a newer proto version that is being passed off as an older -// version (e.g. via a type URL mistake), we don't crash. This is a regression test for -// https://github.com/envoyproxy/envoy/issues/13681. -TEST(VersionConverterProto, UpgradeWithConfusedTypes) { - test::common::config::NextVersion source_next; - source_next.mutable_new_message_in_this_version(); - test::common::config::PreviousVersion source; - ASSERT_TRUE(source.ParseFromString(source_next.SerializeAsString())); - test::common::config::NextVersion dst; - VersionConverter::upgrade(source, dst); -} - -// Bad UTF-8 can fail wire cast during upgrade. -TEST(VersionConverterTest, UpgradeException) { - API_NO_BOOST(envoy::api::v2::Cluster) source; - source.mutable_eds_cluster_config()->set_service_name("UPST128\tAM_HO\001\202\247ST"); - API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; - EXPECT_THROW_WITH_MESSAGE(VersionConverter::upgrade(source, dst), EnvoyException, - "Unable to deserialize during wireCast()"); -} - -// Verify that VersionUtil::scrubHiddenEnvoyDeprecated recursively scrubs any -// deprecated fields. -TEST(VersionConverterTest, ScrubHiddenEnvoyDeprecated) { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) msg; - msg.set_name("foo"); - msg.mutable_hidden_envoy_deprecated_tls_context(); - EXPECT_TRUE(msg.has_hidden_envoy_deprecated_tls_context()); - msg.mutable_load_balancing_policy()->add_policies()->mutable_hidden_envoy_deprecated_config(); - EXPECT_TRUE(msg.load_balancing_policy().policies(0).has_hidden_envoy_deprecated_config()); - VersionUtil::scrubHiddenEnvoyDeprecated(msg); - EXPECT_EQ("foo", msg.name()); - EXPECT_FALSE(msg.has_hidden_envoy_deprecated_tls_context()); - EXPECT_FALSE(msg.load_balancing_policy().policies(0).has_hidden_envoy_deprecated_config()); -} - -// Validate that we can sensibly provide a JSON wire interpretation of messages -// such as DiscoveryRequest based on transport API version. -TEST(VersionConverter, GetJsonStringFromMessage) { - API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request; - discovery_request.mutable_node()->set_hidden_envoy_deprecated_build_version("foo"); - discovery_request.mutable_node()->set_user_agent_name("bar"); - const std::string v2_discovery_request = VersionConverter::getJsonStringFromMessage( - discovery_request, envoy::config::core::v3::ApiVersion::V2); - EXPECT_EQ("{\"node\":{\"build_version\":\"foo\",\"user_agent_name\":\"bar\"}}", - v2_discovery_request); - const std::string auto_discovery_request = VersionConverter::getJsonStringFromMessage( - discovery_request, envoy::config::core::v3::ApiVersion::AUTO); - EXPECT_EQ("{\"node\":{\"build_version\":\"foo\",\"user_agent_name\":\"bar\"}}", - auto_discovery_request); - const std::string v3_discovery_request = VersionConverter::getJsonStringFromMessage( - discovery_request, envoy::config::core::v3::ApiVersion::V3); - EXPECT_EQ("{\"node\":{\"user_agent_name\":\"bar\"}}", v3_discovery_request); -} - -bool hasUnknownFields(const Protobuf::Message& message) { - const Protobuf::Reflection* reflection = message.GetReflection(); - const auto& unknown_field_set = reflection->GetUnknownFields(message); - return !unknown_field_set.empty(); -} - -// Validate that we can sensibly provide a gRPC wire interpretation of messages -// such as DiscoveryRequest based on transport API version. -TEST(VersionConverter, PrepareMessageForGrpcWire) { - API_NO_BOOST(envoy::api::v2::core::Node) v2_node; - v2_node.set_build_version("foo"); - v2_node.set_user_agent_name("bar"); - API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request; - discovery_request.mutable_node()->set_hidden_envoy_deprecated_build_version("foo"); - VersionConverter::upgrade(v2_node, *discovery_request.mutable_node()); - { - API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request_copy; - discovery_request_copy.MergeFrom(discovery_request); - VersionConverter::prepareMessageForGrpcWire(discovery_request_copy, - envoy::config::core::v3::ApiVersion::V2); - API_NO_BOOST(envoy::api::v2::DiscoveryRequest) v2_discovery_request; - EXPECT_TRUE(v2_discovery_request.ParseFromString(discovery_request_copy.SerializeAsString())); - EXPECT_EQ("foo", v2_discovery_request.node().build_version()); - EXPECT_FALSE(hasUnknownFields(v2_discovery_request.node())); - } - { - API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request_copy; - discovery_request_copy.MergeFrom(discovery_request); - VersionConverter::prepareMessageForGrpcWire(discovery_request_copy, - envoy::config::core::v3::ApiVersion::AUTO); - API_NO_BOOST(envoy::api::v2::DiscoveryRequest) auto_discovery_request; - EXPECT_TRUE(auto_discovery_request.ParseFromString(discovery_request_copy.SerializeAsString())); - EXPECT_EQ("foo", auto_discovery_request.node().build_version()); - EXPECT_FALSE(hasUnknownFields(auto_discovery_request.node())); - } - { - API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) discovery_request_copy; - discovery_request_copy.MergeFrom(discovery_request); - VersionConverter::prepareMessageForGrpcWire(discovery_request_copy, - envoy::config::core::v3::ApiVersion::V3); - API_NO_BOOST(envoy::service::discovery::v3::DiscoveryRequest) v3_discovery_request; - EXPECT_TRUE(v3_discovery_request.ParseFromString(discovery_request_copy.SerializeAsString())); - EXPECT_EQ("", v3_discovery_request.node().hidden_envoy_deprecated_build_version()); - EXPECT_FALSE(hasUnknownFields(v3_discovery_request.node())); - } -} - -// Downgrading to an earlier version (where it exists). -TEST(VersionConverterTest, DowngradeEarlier) { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) source; - source.set_ignore_health_on_host_removal(true); - auto downgraded = VersionConverter::downgrade(source); - const Protobuf::Descriptor* desc = downgraded->msg_->GetDescriptor(); - const Protobuf::Reflection* reflection = downgraded->msg_->GetReflection(); - EXPECT_EQ("envoy.api.v2.Cluster", desc->full_name()); - EXPECT_EQ(true, reflection->GetBool(*downgraded->msg_, - desc->FindFieldByName("drain_connections_on_host_removal"))); -} - -// Downgrading is idempotent if no earlier version. -TEST(VersionConverterTest, DowngradeSame) { - API_NO_BOOST(envoy::api::v2::Cluster) source; - source.set_drain_connections_on_host_removal(true); - auto downgraded = VersionConverter::downgrade(source); - const Protobuf::Descriptor* desc = downgraded->msg_->GetDescriptor(); - const Protobuf::Reflection* reflection = downgraded->msg_->GetReflection(); - EXPECT_EQ("envoy.api.v2.Cluster", desc->full_name()); - EXPECT_EQ(true, reflection->GetBool(*downgraded->msg_, - desc->FindFieldByName("drain_connections_on_host_removal"))); -} - -} // namespace -} // namespace Config -} // namespace Envoy diff --git a/test/common/config/xds_grpc_mux_impl_test.cc b/test/common/config/xds_grpc_mux_impl_test.cc new file mode 100644 index 0000000000000..70bde3f3fe7cf --- /dev/null +++ b/test/common/config/xds_grpc_mux_impl_test.cc @@ -0,0 +1,940 @@ +#include + +#include "envoy/api/v2/discovery.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint.pb.validate.h" +#include "envoy/event/timer.h" +#include "envoy/service/discovery/v3/discovery.pb.h" + +#include "source/common/common/empty_string.h" +#include "source/common/config/protobuf_link_hacks.h" +#include "source/common/config/resource_name.h" +#include "source/common/config/utility.h" +#include "source/common/config/xds_mux/grpc_mux_impl.h" +#include "source/common/protobuf/protobuf.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/config/v2_link_hacks.h" +#include "test/mocks/common.h" +#include "test/mocks/config/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/grpc/mocks.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/test_common/logging.h" +#include "test/test_common/resources.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/test_time.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::AtLeast; +using testing::InSequence; +using testing::Invoke; +using testing::IsSubstring; +using testing::NiceMock; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Config { +namespace XdsMux { +namespace { + +// We test some mux specific stuff below, other unit test coverage for singleton use of GrpcMuxImpl +// is provided in [grpc_]subscription_impl_test.cc. +class GrpcMuxImplTestBase : public testing::Test { +public: + GrpcMuxImplTestBase() + : async_client_(new Grpc::MockAsyncClient()), + control_plane_stats_(Utility::generateControlPlaneStats(stats_)), + control_plane_connected_state_( + stats_.gauge("control_plane.connected_state", Stats::Gauge::ImportMode::NeverImport)), + control_plane_pending_requests_(stats_.gauge("control_plane.pending_requests", + Stats::Gauge::ImportMode::NeverImport)) {} + + void setup() { + grpc_mux_ = std::make_unique( + std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), + envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, + local_info_, true); + } + + void setup(const RateLimitSettings& custom_rate_limit_settings) { + grpc_mux_ = std::make_unique( + std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), + envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, custom_rate_limit_settings, + local_info_, true); + } + + void expectSendMessage(const std::string& type_url, + const std::vector& resource_names, const std::string& version, + bool first = false, const std::string& nonce = "", + const Protobuf::int32 error_code = Grpc::Status::WellKnownGrpcStatus::Ok, + const std::string& error_message = "") { + envoy::service::discovery::v3::DiscoveryRequest expected_request; + if (first) { + expected_request.mutable_node()->CopyFrom(local_info_.node()); + } + for (const auto& resource : resource_names) { + expected_request.add_resource_names(resource); + } + if (!version.empty()) { + expected_request.set_version_info(version); + } + expected_request.set_response_nonce(nonce); + expected_request.set_type_url(type_url); + if (error_code != Grpc::Status::WellKnownGrpcStatus::Ok) { + ::google::rpc::Status* error_detail = expected_request.mutable_error_detail(); + error_detail->set_code(error_code); + error_detail->set_message(error_message); + } + EXPECT_CALL( + async_stream_, + sendMessageRaw_(Grpc::ProtoBufferEqIgnoreRepeatedFieldOrdering(expected_request), false)); + } + + Config::GrpcMuxWatchPtr makeWatch(const std::string& type_url, + const absl::flat_hash_set& resources) { + return grpc_mux_->addWatch(type_url, resources, callbacks_, resource_decoder_, {}); + } + + Config::GrpcMuxWatchPtr makeWatch(const std::string& type_url, + const absl::flat_hash_set& resources, + NiceMock& callbacks, + Config::OpaqueResourceDecoder& resource_decoder) { + return grpc_mux_->addWatch(type_url, resources, callbacks, resource_decoder, {}); + } + + NiceMock dispatcher_; + NiceMock random_; + Grpc::MockAsyncClient* async_client_; + Grpc::MockAsyncStream async_stream_; + NiceMock local_info_; + std::unique_ptr grpc_mux_; + NiceMock callbacks_; + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder_{"cluster_name"}; + Stats::TestUtil::TestStore stats_; + ControlPlaneStats control_plane_stats_; + Envoy::Config::RateLimitSettings rate_limit_settings_; + Stats::Gauge& control_plane_connected_state_; + Stats::Gauge& control_plane_pending_requests_; +}; + +class GrpcMuxImplTest : public GrpcMuxImplTestBase { +public: + Event::SimulatedTimeSystem time_system_; +}; + +// Validate behavior when multiple type URL watches are maintained, watches are created/destroyed. +TEST_F(GrpcMuxImplTest, MultipleTypeUrlStreams) { + setup(); + InSequence s; + + auto foo_sub = makeWatch("type_url_foo", {"x", "y"}); + auto bar_sub = makeWatch("type_url_bar", {}); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage("type_url_foo", {"x", "y"}, "", true); + expectSendMessage("type_url_bar", {}, ""); + grpc_mux_->start(); + EXPECT_EQ(1, control_plane_connected_state_.value()); + expectSendMessage("type_url_bar", {"z"}, ""); + auto bar_z_sub = makeWatch("type_url_bar", {"z"}); + expectSendMessage("type_url_bar", {"zz", "z"}, ""); + auto bar_zz_sub = makeWatch("type_url_bar", {"zz"}); + expectSendMessage("type_url_bar", {"z"}, ""); + expectSendMessage("type_url_bar", {}, ""); + expectSendMessage("type_url_foo", {}, ""); +} + +// Validate behavior when multiple type URL watches are maintained and the stream is reset. +TEST_F(GrpcMuxImplTest, ResetStream) { + InSequence s; + + auto* timer = new Event::MockTimer(&dispatcher_); + // TTL timers. + new Event::MockTimer(&dispatcher_); + new Event::MockTimer(&dispatcher_); + new Event::MockTimer(&dispatcher_); + + setup(); + auto foo_sub = makeWatch("type_url_foo", {"x", "y"}); + auto bar_sub = makeWatch("type_url_bar", {}); + auto baz_sub = makeWatch("type_url_baz", {"z"}); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage("type_url_foo", {"x", "y"}, "", true); + expectSendMessage("type_url_bar", {}, ""); + expectSendMessage("type_url_baz", {"z"}, ""); + grpc_mux_->start(); + + // Send another message for foo so that the node is cleared in the cached request. + // This is to test that the the node is set again in the first message below. + expectSendMessage("type_url_foo", {"z", "x", "y"}, ""); + auto foo_z_sub = makeWatch("type_url_foo", {"z"}); + + EXPECT_CALL(callbacks_, + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) + .Times(4); + EXPECT_CALL(random_, random()); + EXPECT_CALL(*timer, enableTimer(_, _)); + grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + EXPECT_EQ(0, control_plane_connected_state_.value()); + EXPECT_EQ(0, control_plane_pending_requests_.value()); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage("type_url_foo", {"z", "x", "y"}, "", true); + expectSendMessage("type_url_bar", {}, ""); + expectSendMessage("type_url_baz", {"z"}, ""); + expectSendMessage("type_url_foo", {"x", "y"}, ""); + timer->invokeCallback(); + + expectSendMessage("type_url_baz", {}, ""); + expectSendMessage("type_url_foo", {}, ""); +} + +// Validate pause-resume behavior. +TEST_F(GrpcMuxImplTest, PauseResume) { + setup(); + InSequence s; + GrpcMuxWatchPtr foo1; + GrpcMuxWatchPtr foo2; + GrpcMuxWatchPtr foo3; + auto foo = grpc_mux_->addWatch("type_url_foo", {"x", "y"}, callbacks_, resource_decoder_, {}); + { + ScopedResume a = grpc_mux_->pause("type_url_foo"); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + grpc_mux_->start(); + expectSendMessage("type_url_foo", {"x", "y"}, "", true); + } + { + ScopedResume a = grpc_mux_->pause("type_url_bar"); + expectSendMessage("type_url_foo", {"z", "x", "y"}, ""); + foo1 = grpc_mux_->addWatch("type_url_foo", {"z"}, callbacks_, resource_decoder_, {}); + } + { + ScopedResume a = grpc_mux_->pause("type_url_foo"); + foo2 = grpc_mux_->addWatch("type_url_foo", {"zz"}, callbacks_, resource_decoder_, {}); + expectSendMessage("type_url_foo", {"zz", "z", "x", "y"}, ""); + } + // When nesting, we only have a single resumption. + { + ScopedResume a = grpc_mux_->pause("type_url_foo"); + ScopedResume b = grpc_mux_->pause("type_url_foo"); + foo3 = grpc_mux_->addWatch("type_url_foo", {"zzz"}, callbacks_, resource_decoder_, {}); + expectSendMessage("type_url_foo", {"zzz", "zz", "z", "x", "y"}, ""); + } + + grpc_mux_->pause("type_url_foo")->cancel(); +} + +// Validate behavior when type URL mismatches occur. +TEST_F(GrpcMuxImplTest, TypeUrlMismatch) { + setup(); + + auto invalid_response = std::make_unique(); + InSequence s; + auto foo_sub = makeWatch("type_url_foo", {"x", "y"}); + + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage("type_url_foo", {"x", "y"}, "", true); + grpc_mux_->start(); + + { + auto response = std::make_unique(); + response->set_type_url("type_url_bar"); + response->set_version_info("bar-version"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + { + invalid_response->set_type_url("type_url_foo"); + invalid_response->set_version_info("foo-version"); + invalid_response->mutable_resources()->Add()->set_type_url("type_url_bar"); + EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _)) + .WillOnce(Invoke([](Envoy::Config::ConfigUpdateFailureReason, const EnvoyException* e) { + EXPECT_TRUE( + IsSubstring("", "", + "type URL type_url_bar embedded in an individual Any does not match the " + "message-wide type URL type_url_foo in DiscoveryResponse", + e->what())); + })); + + expectSendMessage( + "type_url_foo", {"x", "y"}, "", false, "", Grpc::Status::WellKnownGrpcStatus::Internal, + fmt::format("type URL type_url_bar embedded in an individual Any does not match the " + "message-wide type URL type_url_foo in DiscoveryResponse {}", + invalid_response->DebugString())); + grpc_mux_->onDiscoveryResponse(std::move(invalid_response), control_plane_stats_); + } + expectSendMessage("type_url_foo", {}, ""); +} + +TEST_F(GrpcMuxImplTest, RpcErrorMessageTruncated) { + setup(); + auto invalid_response = std::make_unique(); + InSequence s; + auto foo_sub = makeWatch("type_url_foo", {"x", "y"}); + + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage("type_url_foo", {"x", "y"}, "", true); + grpc_mux_->start(); + + { // Large error message sent back to management server is truncated. + const std::string very_large_type_url(1 << 20, 'A'); + invalid_response->set_type_url("type_url_foo"); + invalid_response->set_version_info("invalid"); + invalid_response->mutable_resources()->Add()->set_type_url(very_large_type_url); + EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _)) + .WillOnce(Invoke([&very_large_type_url](Envoy::Config::ConfigUpdateFailureReason, + const EnvoyException* e) { + EXPECT_TRUE( + IsSubstring("", "", + fmt::format("type URL {} embedded in an individual Any does not match " + "the message-wide type URL type_url_foo in DiscoveryResponse", + very_large_type_url), // Local error message is not truncated. + e->what())); + })); + expectSendMessage("type_url_foo", {"x", "y"}, "", false, "", + Grpc::Status::WellKnownGrpcStatus::Internal, + fmt::format("type URL {}...(truncated)", std::string(4087, 'A'))); + grpc_mux_->onDiscoveryResponse(std::move(invalid_response), control_plane_stats_); + } + expectSendMessage("type_url_foo", {}, ""); +} + +envoy::service::discovery::v3::Resource heartbeatResource(std::chrono::milliseconds ttl, + const std::string& name) { + envoy::service::discovery::v3::Resource resource; + + resource.mutable_ttl()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(ttl.count())); + resource.set_name(name); + + return resource; +} + +envoy::service::discovery::v3::Resource +resourceWithTtl(std::chrono::milliseconds ttl, + envoy::config::endpoint::v3::ClusterLoadAssignment& cla) { + envoy::service::discovery::v3::Resource resource; + resource.mutable_resource()->PackFrom(cla); + resource.mutable_ttl()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(ttl.count())); + + resource.set_name(cla.cluster_name()); + + return resource; +} +envoy::service::discovery::v3::Resource +resourceWithEmptyTtl(envoy::config::endpoint::v3::ClusterLoadAssignment& cla) { + envoy::service::discovery::v3::Resource resource; + resource.mutable_resource()->PackFrom(cla); + resource.set_name(cla.cluster_name()); + return resource; +} +// Validates the behavior when the TTL timer expires. +TEST_F(GrpcMuxImplTest, ResourceTTL) { + setup(); + + time_system_.setSystemTime(std::chrono::seconds(0)); + + TestUtility::TestOpaqueResourceDecoderImpl + resource_decoder("cluster_name"); + const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; + InSequence s; + auto* ttl_timer = new Event::MockTimer(&dispatcher_); + auto eds_sub = makeWatch(type_url, {"x"}, callbacks_, resource_decoder); + + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {"x"}, "", true); + grpc_mux_->start(); + + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + + auto wrapped_resource = resourceWithTtl(std::chrono::milliseconds(1000), load_assignment); + response->add_resources()->PackFrom(wrapped_resource); + + EXPECT_CALL(*ttl_timer, enabled()); + EXPECT_CALL(*ttl_timer, enableTimer(std::chrono::milliseconds(1000), _)); + EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) + .WillOnce(Invoke([](const std::vector& resources, const std::string&) { + EXPECT_EQ(1, resources.size()); + })); + expectSendMessage(type_url, {"x"}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + // Increase the TTL. + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + auto wrapped_resource = resourceWithTtl(std::chrono::milliseconds(10000), load_assignment); + response->add_resources()->PackFrom(wrapped_resource); + + EXPECT_CALL(*ttl_timer, enabled()); + EXPECT_CALL(*ttl_timer, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) + .WillOnce(Invoke([](const std::vector& resources, const std::string&) { + EXPECT_EQ(1, resources.size()); + })); + // No update, just a change in TTL. + expectSendMessage(type_url, {"x"}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + // Refresh the TTL with a heartbeat response. + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + auto wrapped_resource = heartbeatResource(std::chrono::milliseconds(10000), "x"); + response->add_resources()->PackFrom(wrapped_resource); + + EXPECT_CALL(*ttl_timer, enabled()); + EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) + .WillOnce(Invoke([](const std::vector& resources, const std::string&) { + EXPECT_TRUE(resources.empty()); + })); + + // No update, just a change in TTL. + expectSendMessage(type_url, {"x"}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + // Remove the TTL. + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + response->add_resources()->PackFrom(resourceWithEmptyTtl(load_assignment)); + + EXPECT_CALL(*ttl_timer, disableTimer()); + EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) + .WillOnce(Invoke([](const std::vector& resources, const std::string&) { + EXPECT_EQ(1, resources.size()); + })); + expectSendMessage(type_url, {"x"}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + // Put the TTL back. + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + auto wrapped_resource = resourceWithTtl(std::chrono::milliseconds(10000), load_assignment); + response->add_resources()->PackFrom(wrapped_resource); + + EXPECT_CALL(*ttl_timer, enabled()); + EXPECT_CALL(*ttl_timer, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) + .WillOnce(Invoke([](const std::vector& resources, const std::string&) { + EXPECT_EQ(1, resources.size()); + })); + // No update, just a change in TTL. + expectSendMessage(type_url, {"x"}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + time_system_.setSystemTime(std::chrono::seconds(11)); + EXPECT_CALL(callbacks_, onConfigUpdate(_, _, "")) + .WillOnce(Invoke([](auto, const auto& removed, auto) { + EXPECT_EQ(1, removed.size()); + EXPECT_EQ("x", removed.Get(0)); + })); + // Fire the TTL timer. + EXPECT_CALL(*ttl_timer, disableTimer()); + ttl_timer->invokeCallback(); + + expectSendMessage(type_url, {}, "1"); +} + +// Checks that the control plane identifier is logged +TEST_F(GrpcMuxImplTest, LogsControlPlaneIndentifier) { + setup(); + + std::string type_url = "foo"; + auto foo_sub = makeWatch(type_url, {}, callbacks_, resource_decoder_); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {}, "", true); + grpc_mux_->start(); + + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + response->mutable_control_plane()->set_identifier("control_plane_ID"); + + EXPECT_CALL(callbacks_, onConfigUpdate(_, _)); + expectSendMessage(type_url, {}, "1"); + EXPECT_LOG_CONTAINS("debug", "for foo from control_plane_ID", + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response))); + } + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("2"); + response->mutable_control_plane()->set_identifier("different_ID"); + + EXPECT_CALL(callbacks_, onConfigUpdate(_, _)); + expectSendMessage(type_url, {}, "2"); + EXPECT_LOG_CONTAINS("debug", "for foo from different_ID", + grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response))); + } +} + +// Validate behavior when watches has an unknown resource name. +TEST_F(GrpcMuxImplTest, WildcardWatch) { + setup(); + + const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; + auto foo_sub = makeWatch(type_url, {}, callbacks_, resource_decoder_); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {}, "", true); + grpc_mux_->start(); + + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + response->add_resources()->PackFrom(load_assignment); + EXPECT_CALL(callbacks_, onConfigUpdate(_, "1")) + .WillOnce(Invoke([&load_assignment](const std::vector& resources, + const std::string&) { + EXPECT_EQ(1, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); + })); + expectSendMessage(type_url, {}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } +} + +// Validate behavior when watches specify resources (potentially overlapping). +TEST_F(GrpcMuxImplTest, WatchDemux) { + setup(); + // We will not require InSequence here: an update that causes multiple onConfigUpdates + // causes them in an indeterminate order, based on the whims of the hash map. + const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; + + NiceMock foo_callbacks; + auto foo_sub = makeWatch(type_url, {"x", "y"}, foo_callbacks, resource_decoder_); + NiceMock bar_callbacks; + auto bar_sub = makeWatch(type_url, {"y", "z"}, bar_callbacks, resource_decoder_); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + // Should dedupe the "x" resource. + expectSendMessage(type_url, {"y", "z", "x"}, "", true); + grpc_mux_->start(); + + // Send just x; only foo_callbacks should receive an onConfigUpdate(). + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + response->add_resources()->PackFrom(load_assignment); + EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "1")).Times(0); + EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) + .WillOnce(Invoke([&load_assignment](const std::vector& resources, + const std::string&) { + EXPECT_EQ(1, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment)); + })); + expectSendMessage(type_url, {"y", "z", "x"}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + // Send x y and z; foo_ and bar_callbacks should both receive onConfigUpdate()s, carrying {x,y} + // and {y,z} respectively. + { + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("2"); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_x; + load_assignment_x.set_cluster_name("x"); + response->add_resources()->PackFrom(load_assignment_x); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_y; + load_assignment_y.set_cluster_name("y"); + response->add_resources()->PackFrom(load_assignment_y); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment_z; + load_assignment_z.set_cluster_name("z"); + response->add_resources()->PackFrom(load_assignment_z); + EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "2")) + .WillOnce(Invoke([&load_assignment_y, &load_assignment_z]( + const std::vector& resources, const std::string&) { + EXPECT_EQ(2, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_y)); + const auto& expected_assignment_1 = + dynamic_cast( + resources[1].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment_1, load_assignment_z)); + })); + EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "2")) + .WillOnce(Invoke([&load_assignment_x, &load_assignment_y]( + const std::vector& resources, const std::string&) { + EXPECT_EQ(2, resources.size()); + const auto& expected_assignment = + dynamic_cast( + resources[0].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment, load_assignment_x)); + const auto& expected_assignment_1 = + dynamic_cast( + resources[1].get().resource()); + EXPECT_TRUE(TestUtility::protoEqual(expected_assignment_1, load_assignment_y)); + })); + expectSendMessage(type_url, {"y", "z", "x"}, "2"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + + expectSendMessage(type_url, {"x", "y"}, "2"); + expectSendMessage(type_url, {}, "2"); +} + +// Validate behavior when we have multiple watchers that send empty updates. +TEST_F(GrpcMuxImplTest, MultipleWatcherWithEmptyUpdates) { + setup(); + InSequence s; + const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; + NiceMock foo_callbacks; + auto foo_sub = makeWatch(type_url, {"x", "y"}, foo_callbacks, resource_decoder_); + + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {"x", "y"}, "", true); + grpc_mux_->start(); + + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + + EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")).Times(0); + expectSendMessage(type_url, {"x", "y"}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + + expectSendMessage(type_url, {}, "1"); +} + +// Validate behavior when we have Single Watcher that sends Empty updates. +TEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) { + setup(); + const std::string& type_url = Config::TypeUrl::get().Cluster; + NiceMock foo_callbacks; + auto foo_sub = makeWatch(type_url, {}, foo_callbacks, resource_decoder_); + + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {}, "", true); + grpc_mux_->start(); + + auto response = std::make_unique(); + response->set_type_url(type_url); + response->set_version_info("1"); + // Validate that onConfigUpdate is called with empty resources. + EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) + .WillOnce(Invoke([](const std::vector& resources, const std::string&) { + EXPECT_TRUE(resources.empty()); + })); + expectSendMessage(type_url, {}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); +} + +// Exactly one test requires a mock time system to provoke behavior that cannot +// easily be achieved with a SimulatedTimeSystem. +class GrpcMuxImplTestWithMockTimeSystem : public GrpcMuxImplTestBase { +public: + Event::DelegatingTestTimeSystem mock_time_system_; +}; + +// Verifies that rate limiting is not enforced with defaults. +TEST_F(GrpcMuxImplTestWithMockTimeSystem, TooManyRequestsWithDefaultSettings) { + + auto ttl_timer = new Event::MockTimer(&dispatcher_); + // Retry timer, + new Event::MockTimer(&dispatcher_); + + // Validate that rate limiter is not created. + EXPECT_CALL(*mock_time_system_, monotonicTime()).Times(0); + + setup(); + + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(AtLeast(99)); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + + const auto onReceiveMessage = [&](uint64_t burst) { + for (uint64_t i = 0; i < burst; i++) { + auto response = std::make_unique(); + response->set_version_info("type_url_baz"); + response->set_nonce("type_url_bar"); + response->set_type_url("type_url_foo"); + EXPECT_CALL(*ttl_timer, disableTimer()); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + }; + + auto foo_sub = makeWatch("type_url_foo", {"x"}); + expectSendMessage("type_url_foo", {"x"}, "", true); + grpc_mux_->start(); + + // Exhausts the limit. + onReceiveMessage(99); + + // API calls go over the limit but we do not see the stat incremented. + onReceiveMessage(1); + EXPECT_EQ(0, stats_.counter("control_plane.rate_limit_enforced").value()); +} + +// Verifies that default rate limiting is enforced with empty RateLimitSettings. +TEST_F(GrpcMuxImplTest, TooManyRequestsWithEmptyRateLimitSettings) { + // Validate that request drain timer is created. + + auto ttl_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* drain_request_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* retry_timer = new Event::MockTimer(&dispatcher_); + + RateLimitSettings custom_rate_limit_settings; + custom_rate_limit_settings.enabled_ = true; + setup(custom_rate_limit_settings); + + // Attempt to send 99 messages. One of them is rate limited (and we never drain). + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(99); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + + const auto onReceiveMessage = [&](uint64_t burst) { + for (uint64_t i = 0; i < burst; i++) { + auto response = std::make_unique(); + response->set_version_info("type_url_baz"); + response->set_nonce("type_url_bar"); + response->set_type_url("type_url_foo"); + EXPECT_CALL(*ttl_timer, disableTimer()); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + }; + + auto foo_sub = makeWatch("type_url_foo", {"x"}); + expectSendMessage("type_url_foo", {"x"}, "", true); + grpc_mux_->start(); + + // Validate that drain_request_timer is enabled when there are no tokens. + EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(100), _)); + // The drain timer enable is checked twice, once when we limit, again when the watch is destroyed. + EXPECT_CALL(*drain_request_timer, enabled()).Times(11); + onReceiveMessage(110); + EXPECT_EQ(11, stats_.counter("control_plane.rate_limit_enforced").value()); + EXPECT_EQ(11, control_plane_pending_requests_.value()); + + // Validate that when we reset a stream with pending requests, it reverts back to the initial + // query (i.e. the queue is discarded). + EXPECT_CALL(callbacks_, + onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)); + EXPECT_CALL(random_, random()); + EXPECT_CALL(*retry_timer, enableTimer(_, _)); + grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); + EXPECT_EQ(11, control_plane_pending_requests_.value()); + EXPECT_EQ(0, control_plane_connected_state_.value()); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + time_system_.setMonotonicTime(std::chrono::seconds(30)); + retry_timer->invokeCallback(); + EXPECT_EQ(0, control_plane_pending_requests_.value()); + // One more message on the way out when the watch is destroyed. + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); +} + +// Verifies that rate limiting is enforced with custom RateLimitSettings. +TEST_F(GrpcMuxImplTest, TooManyRequestsWithCustomRateLimitSettings) { + // Validate that request drain timer is created. + + // TTL timer. + auto ttl_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* drain_request_timer = new Event::MockTimer(&dispatcher_); + // Retry timer. + new Event::MockTimer(&dispatcher_); + + RateLimitSettings custom_rate_limit_settings; + custom_rate_limit_settings.enabled_ = true; + custom_rate_limit_settings.max_tokens_ = 250; + custom_rate_limit_settings.fill_rate_ = 2; + setup(custom_rate_limit_settings); + + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)).Times(AtLeast(260)); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + + const auto onReceiveMessage = [&](uint64_t burst) { + for (uint64_t i = 0; i < burst; i++) { + auto response = std::make_unique(); + response->set_version_info("type_url_baz"); + response->set_nonce("type_url_bar"); + response->set_type_url("type_url_foo"); + EXPECT_CALL(*ttl_timer, disableTimer()); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + } + }; + + auto foo_sub = makeWatch("type_url_foo", {"x"}); + expectSendMessage("type_url_foo", {"x"}, "", true); + grpc_mux_->start(); + + // Validate that rate limit is not enforced for 100 requests. + onReceiveMessage(100); + EXPECT_EQ(0, stats_.counter("control_plane.rate_limit_enforced").value()); + + // Validate that drain_request_timer is enabled when there are no tokens. + EXPECT_CALL(*drain_request_timer, enableTimer(std::chrono::milliseconds(500), _)); + EXPECT_CALL(*drain_request_timer, enabled()).Times(11); + onReceiveMessage(160); + EXPECT_EQ(11, stats_.counter("control_plane.rate_limit_enforced").value()); + EXPECT_EQ(11, control_plane_pending_requests_.value()); + + // Validate that drain requests call when there are multiple requests in queue. + time_system_.setMonotonicTime(std::chrono::seconds(10)); + drain_request_timer->invokeCallback(); + + // Check that the pending_requests stat is updated with the queue drain. + EXPECT_EQ(0, control_plane_pending_requests_.value()); +} + +// Verifies that a message with no resources is accepted. +TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsEmptyResources) { + setup(); + + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + + const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; + + grpc_mux_->start(); + { + // subscribe and unsubscribe to simulate a cluster added and removed + expectSendMessage(type_url, {"y"}, "", true); + auto temp_sub = makeWatch(type_url, {"y"}); + expectSendMessage(type_url, {}, ""); + } + + // simulate the server sending empty CLA message to notify envoy that the CLA was removed. + auto response = std::make_unique(); + response->set_nonce("bar"); + response->set_version_info("1"); + response->set_type_url(type_url); + + // Although the update will change nothing for us, we will "accept" it, and so according + // to the spec we should ACK it. + expectSendMessage(type_url, {}, "1", false, "bar"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); + + // When we become interested in "x", we should send a request indicating that interest. + expectSendMessage(type_url, {"x"}, "1", false, "bar"); + auto sub = makeWatch(type_url, {"x"}); + + // Watch destroyed -> interest gone -> unsubscribe request. + expectSendMessage(type_url, {}, "1", false, "bar"); +} + +// Verifies that a message with some resources is accepted even when there are no watches. +// Rationale: SotW gRPC xDS has always been willing to accept updates that include +// uninteresting resources. It should not matter whether those uninteresting resources +// are accompanied by interesting ones. +// Note: this was previously "rejects", not "accepts". See +// https://github.com/envoyproxy/envoy/pull/8350#discussion_r328218220 for discussion. +TEST_F(GrpcMuxImplTest, UnwatchedTypeAcceptsResources) { + setup(); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; + grpc_mux_->start(); + + // subscribe and unsubscribe so that the type is known to envoy + { + expectSendMessage(type_url, {"y"}, "", true); + expectSendMessage(type_url, {}, ""); + auto delete_immediately = makeWatch(type_url, {"y"}); + } + auto response = std::make_unique(); + response->set_type_url(type_url); + envoy::config::endpoint::v3::ClusterLoadAssignment load_assignment; + load_assignment.set_cluster_name("x"); + response->add_resources()->PackFrom(load_assignment); + response->set_version_info("1"); + + expectSendMessage(type_url, {}, "1"); + grpc_mux_->onDiscoveryResponse(std::move(response), control_plane_stats_); +} + +TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyClusterName) { + EXPECT_CALL(local_info_, clusterName()).WillOnce(ReturnRef(EMPTY_STRING)); + EXPECT_THROW_WITH_MESSAGE( + XdsMux::GrpcMuxSotw( + std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), + envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, + local_info_, true), + EnvoyException, + "ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via " + "--service-node and --service-cluster options."); +} + +TEST_F(GrpcMuxImplTest, BadLocalInfoEmptyNodeName) { + EXPECT_CALL(local_info_, nodeName()).WillOnce(ReturnRef(EMPTY_STRING)); + EXPECT_THROW_WITH_MESSAGE( + XdsMux::GrpcMuxSotw( + std::unique_ptr(async_client_), dispatcher_, + *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources"), + envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, rate_limit_settings_, + local_info_, true), + EnvoyException, + "ads: node 'id' and 'cluster' are required. Set it either in 'node' config or via " + "--service-node and --service-cluster options."); +} + +// Validate behavior when dynamic context parameters are updated. +TEST_F(GrpcMuxImplTest, DynamicContextParameters) { + setup(); + InSequence s; + auto foo = grpc_mux_->addWatch("foo", {"x", "y"}, callbacks_, resource_decoder_, {}); + auto bar = grpc_mux_->addWatch("bar", {}, callbacks_, resource_decoder_, {}); + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + expectSendMessage("foo", {"x", "y"}, "", true); + expectSendMessage("bar", {}, ""); + grpc_mux_->start(); + // Unknown type, shouldn't do anything. + local_info_.context_provider_.update_cb_handler_.runCallbacks("baz"); + // Update to foo type should resend Node. + expectSendMessage("foo", {"x", "y"}, "", true); + local_info_.context_provider_.update_cb_handler_.runCallbacks("foo"); + // Update to bar type should resend Node. + expectSendMessage("bar", {}, "", true); + local_info_.context_provider_.update_cb_handler_.runCallbacks("bar"); + // only destruction of foo watch is going to result in an unsubscribe message. + // bar watch is empty and its destruction doesn't change it resource list. + expectSendMessage("foo", {}, "", false); +} + +} // namespace +} // namespace XdsMux +} // namespace Config +} // namespace Envoy diff --git a/test/common/conn_pool/conn_pool_base_test.cc b/test/common/conn_pool/conn_pool_base_test.cc index f1f53f69f9fd8..869dafa68a678 100644 --- a/test/common/conn_pool/conn_pool_base_test.cc +++ b/test/common/conn_pool/conn_pool_base_test.cc @@ -108,7 +108,7 @@ TEST_F(ConnPoolImplBaseTest, BasicPreconnect) { // On new stream, create 2 connections. CHECK_STATE(0 /*active*/, 0 /*pending*/, 0 /*connecting capacity*/); EXPECT_CALL(pool_, instantiateActiveClient).Times(2); - auto cancelable = pool_.newStream(context_); + auto cancelable = pool_.newStreamImpl(context_); CHECK_STATE(0 /*active*/, 1 /*pending*/, 2 /*connecting capacity*/); cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess); @@ -124,13 +124,13 @@ TEST_F(ConnPoolImplBaseTest, PreconnectOnDisconnect) { // On new stream, create 2 connections. EXPECT_CALL(pool_, instantiateActiveClient).Times(2); - pool_.newStream(context_); + pool_.newStreamImpl(context_); CHECK_STATE(0 /*active*/, 1 /*pending*/, 2 /*connecting capacity*/); // If a connection fails, existing connections are purged. If a retry causes // a new stream, make sure we create the correct number of connections. EXPECT_CALL(pool_, onPoolFailure).WillOnce(InvokeWithoutArgs([&]() -> void { - pool_.newStream(context_); + pool_.newStreamImpl(context_); })); EXPECT_CALL(pool_, instantiateActiveClient); clients_[0]->close(); @@ -149,7 +149,7 @@ TEST_F(ConnPoolImplBaseTest, NoPreconnectIfUnhealthy) { // On new stream, create 1 connection. EXPECT_CALL(pool_, instantiateActiveClient); - auto cancelable = pool_.newStream(context_); + auto cancelable = pool_.newStreamImpl(context_); CHECK_STATE(0 /*active*/, 1 /*pending*/, 1 /*connecting capacity*/); cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess); @@ -166,7 +166,7 @@ TEST_F(ConnPoolImplBaseTest, NoPreconnectIfDegraded) { // On new stream, create 1 connection. EXPECT_CALL(pool_, instantiateActiveClient); - auto cancelable = pool_.newStream(context_); + auto cancelable = pool_.newStreamImpl(context_); cancelable->cancel(ConnectionPool::CancelPolicy::CloseExcess); pool_.destructAllConnections(); @@ -178,17 +178,17 @@ TEST_F(ConnPoolImplBaseTest, ExplicitPreconnect) { EXPECT_CALL(pool_, instantiateActiveClient).Times(AnyNumber()); // With global preconnect off, we won't preconnect. - EXPECT_FALSE(pool_.maybePreconnect(0)); + EXPECT_FALSE(pool_.maybePreconnectImpl(0)); CHECK_STATE(0 /*active*/, 0 /*pending*/, 0 /*connecting capacity*/); // With preconnect ratio of 1.1, we'll preconnect two connections. // Currently, no number of subsequent calls to preconnect will increase that. - EXPECT_TRUE(pool_.maybePreconnect(1.1)); - EXPECT_TRUE(pool_.maybePreconnect(1.1)); - EXPECT_FALSE(pool_.maybePreconnect(1.1)); + EXPECT_TRUE(pool_.maybePreconnectImpl(1.1)); + EXPECT_TRUE(pool_.maybePreconnectImpl(1.1)); + EXPECT_FALSE(pool_.maybePreconnectImpl(1.1)); CHECK_STATE(0 /*active*/, 0 /*pending*/, 2 /*connecting capacity*/); // With a higher preconnect ratio, more connections may be preconnected. - EXPECT_TRUE(pool_.maybePreconnect(3)); + EXPECT_TRUE(pool_.maybePreconnectImpl(3)); pool_.destructAllConnections(); } @@ -199,7 +199,7 @@ TEST_F(ConnPoolImplBaseTest, ExplicitPreconnectNotHealthy) { // Preconnect won't occur if the host is not healthy. host_->healthFlagSet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH); - EXPECT_FALSE(pool_.maybePreconnect(1)); + EXPECT_FALSE(pool_.maybePreconnectImpl(1)); } // Remote close simulates the peer closing the connection. @@ -208,7 +208,7 @@ TEST_F(ConnPoolImplBaseTest, PoolIdleCallbackTriggeredRemoteClose) { // Create a new stream using the pool EXPECT_CALL(pool_, instantiateActiveClient); - pool_.newStream(context_); + pool_.newStreamImpl(context_); ASSERT_EQ(1, clients_.size()); // Emulate the new upstream connection establishment @@ -236,7 +236,7 @@ TEST_F(ConnPoolImplBaseTest, PoolIdleCallbackTriggeredLocalClose) { // Create a new stream using the pool EXPECT_CALL(pool_, instantiateActiveClient); - pool_.newStream(context_); + pool_.newStreamImpl(context_); ASSERT_EQ(1, clients_.size()); // Emulate the new upstream connection establishment diff --git a/test/common/filter/http/BUILD b/test/common/filter/BUILD similarity index 83% rename from test/common/filter/http/BUILD rename to test/common/filter/BUILD index c6ce0344543c4..4d9b44b7f211f 100644 --- a/test/common/filter/http/BUILD +++ b/test/common/filter/BUILD @@ -9,11 +9,11 @@ licenses(["notice"]) # Apache 2 envoy_package() envoy_cc_test( - name = "filter_config_discovery_impl_test", - srcs = ["filter_config_discovery_impl_test.cc"], + name = "config_discovery_impl_test", + srcs = ["config_discovery_impl_test.cc"], deps = [ "//source/common/config:utility_lib", - "//source/common/filter/http:filter_config_discovery_lib", + "//source/common/filter:config_discovery_lib", "//source/common/json:json_loader_lib", "//source/extensions/filters/http/health_check:config", "//source/extensions/filters/http/router:config", diff --git a/test/common/filter/http/filter_config_discovery_impl_test.cc b/test/common/filter/config_discovery_impl_test.cc similarity index 98% rename from test/common/filter/http/filter_config_discovery_impl_test.cc rename to test/common/filter/config_discovery_impl_test.cc index 3fe8e87f0da84..ede45f2577257 100644 --- a/test/common/filter/http/filter_config_discovery_impl_test.cc +++ b/test/common/filter/config_discovery_impl_test.cc @@ -9,7 +9,7 @@ #include "envoy/stats/scope.h" #include "source/common/config/utility.h" -#include "source/common/filter/http/filter_config_discovery_impl.h" +#include "source/common/filter/config_discovery_impl.h" #include "source/common/json/json_loader.h" #include "test/mocks/init/mocks.h" @@ -34,7 +34,6 @@ using testing::ReturnRef; namespace Envoy { namespace Filter { -namespace Http { namespace { class FilterConfigDiscoveryTestBase : public testing::Test { @@ -73,7 +72,7 @@ class FilterConfigDiscoveryTestBase : public testing::Test { class FilterConfigDiscoveryImplTest : public FilterConfigDiscoveryTestBase { public: FilterConfigDiscoveryImplTest() { - filter_config_provider_manager_ = std::make_unique(); + filter_config_provider_manager_ = std::make_unique(); } ~FilterConfigDiscoveryImplTest() override { factory_context_.thread_local_.shutdownThread(); } @@ -406,6 +405,5 @@ TEST_F(FilterConfigDiscoveryImplTest, TerminalFilterInvalid) { } } // namespace -} // namespace Http } // namespace Filter } // namespace Envoy diff --git a/test/common/formatter/substitution_formatter_speed_test.cc b/test/common/formatter/substitution_formatter_speed_test.cc index 320bd7d8878fd..7c919bfc4c100 100644 --- a/test/common/formatter/substitution_formatter_speed_test.cc +++ b/test/common/formatter/substitution_formatter_speed_test.cc @@ -48,7 +48,7 @@ std::unique_ptr makeStructFormatter(bool type std::unique_ptr makeStreamInfo() { auto stream_info = std::make_unique(); - stream_info->downstream_address_provider_->setRemoteAddress( + stream_info->downstream_connection_info_provider_->setRemoteAddress( std::make_shared("203.0.113.1")); return stream_info; } diff --git a/test/common/formatter/substitution_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc index 00d0ce6a3a4d5..6ca626f78d364 100644 --- a/test/common/formatter/substitution_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -597,7 +597,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { // Validate for IPv4 address auto address = Network::Address::InstanceConstSharedPtr{ new Network::Address::Ipv4Instance("127.1.2.3", 8443)}; - stream_info.downstream_address_provider_->setLocalAddress(address); + stream_info.downstream_connection_info_provider_->setLocalAddress(address); EXPECT_EQ("8443", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -607,7 +607,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { // Validate for IPv6 address address = Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance("::1", 9443)}; - stream_info.downstream_address_provider_->setLocalAddress(address); + stream_info.downstream_connection_info_provider_->setLocalAddress(address); EXPECT_EQ("9443", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -616,7 +616,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { // Validate for Pipe address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance("/foo")}; - stream_info.downstream_address_provider_->setLocalAddress(address); + stream_info.downstream_connection_info_provider_->setLocalAddress(address); EXPECT_EQ("", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -663,7 +663,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { { StreamInfoFormatter upstream_format("CONNECTION_ID"); uint64_t id = 123; - stream_info.downstream_address_provider_->setConnectionID(id); + stream_info.downstream_connection_info_provider_->setConnectionID(id); EXPECT_EQ("123", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -674,7 +674,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { { StreamInfoFormatter upstream_format("REQUESTED_SERVER_NAME"); std::string requested_server_name = "stub_server"; - stream_info.downstream_address_provider_->setRequestedServerName(requested_server_name); + stream_info.downstream_connection_info_provider_->setRequestedServerName(requested_server_name); EXPECT_EQ("stub_server", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -685,7 +685,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { { StreamInfoFormatter upstream_format("REQUESTED_SERVER_NAME"); std::string requested_server_name; - stream_info.downstream_address_provider_->setRequestedServerName(requested_server_name); + stream_info.downstream_connection_info_provider_->setRequestedServerName(requested_server_name); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -698,7 +698,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); const std::vector sans{"san"}; EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("san", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -711,7 +711,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); const std::vector sans{"san1", "san2"}; EXPECT_CALL(*connection_info, uriSanPeerCertificate()).WillRepeatedly(Return(sans)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("san1,san2", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); } @@ -720,7 +720,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, uriSanPeerCertificate()) .WillRepeatedly(Return(std::vector())); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -728,7 +728,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_URI_SAN"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -741,7 +741,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); const std::vector sans{"san"}; EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("san", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -753,7 +753,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); const std::vector sans{"san1", "san2"}; EXPECT_CALL(*connection_info, uriSanLocalCertificate()).WillRepeatedly(Return(sans)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("san1,san2", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); } @@ -762,7 +762,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, uriSanLocalCertificate()) .WillRepeatedly(Return(std::vector())); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -770,7 +770,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_URI_SAN"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -784,7 +784,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { const std::string subject_local = "subject"; EXPECT_CALL(*connection_info, subjectLocalCertificate()) .WillRepeatedly(ReturnRef(subject_local)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("subject", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -796,7 +796,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, subjectLocalCertificate()) .WillRepeatedly(ReturnRef(EMPTY_STRING)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -804,7 +804,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_LOCAL_SUBJECT"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -817,7 +817,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); const std::string subject_peer = "subject"; EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("subject", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -828,7 +828,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -836,7 +836,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -849,7 +849,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); const std::string session_id = "deadbeef"; EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(session_id)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("deadbeef", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -860,7 +860,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_SESSION_ID"); auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, sessionId()).WillRepeatedly(ReturnRef(EMPTY_STRING)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -868,7 +868,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_SESSION_ID"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -881,7 +881,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, ciphersuiteString()) .WillRepeatedly(Return("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384")); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -890,7 +890,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_CIPHER"); auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, ciphersuiteString()).WillRepeatedly(Return("")); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -898,7 +898,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_CIPHER"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -911,7 +911,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); std::string tlsVersion = "TLSv1.2"; EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(tlsVersion)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("TLSv1.2", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -922,7 +922,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_VERSION"); auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, tlsVersion()).WillRepeatedly(ReturnRef(EMPTY_STRING)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -930,7 +930,8 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_TLS_VERSION"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -944,7 +945,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; EXPECT_CALL(*connection_info, sha256PeerCertificateDigest()) .WillRepeatedly(ReturnRef(expected_sha)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(expected_sha, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -957,7 +958,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { std::string expected_sha; EXPECT_CALL(*connection_info, sha256PeerCertificateDigest()) .WillRepeatedly(ReturnRef(expected_sha)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -965,7 +966,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_256"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -979,7 +980,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; EXPECT_CALL(*connection_info, sha1PeerCertificateDigest()) .WillRepeatedly(ReturnRef(expected_sha)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(expected_sha, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -992,7 +993,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { std::string expected_sha; EXPECT_CALL(*connection_info, sha1PeerCertificateDigest()) .WillRepeatedly(ReturnRef(expected_sha)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -1000,7 +1001,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_FINGERPRINT_1"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1014,7 +1015,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { const std::string serial_number = "b8b5ecc898f2124a"; EXPECT_CALL(*connection_info, serialNumberPeerCertificate()) .WillRepeatedly(ReturnRef(serial_number)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("b8b5ecc898f2124a", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -1026,7 +1027,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, serialNumberPeerCertificate()) .WillRepeatedly(ReturnRef(EMPTY_STRING)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -1034,7 +1035,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SERIAL"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1048,7 +1049,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { const std::string issuer_peer = "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"; EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(issuer_peer)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1057,7 +1058,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_ISSUER"); auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, issuerPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -1065,7 +1066,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_ISSUER"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1079,7 +1080,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { const std::string subject_peer = "CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"; EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject_peer)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("CN=Test Server,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US", upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1088,7 +1089,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, subjectPeerCertificate()).WillRepeatedly(ReturnRef(EMPTY_STRING)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -1096,7 +1097,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_SUBJECT"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1110,7 +1111,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { std::string expected_cert = ""; EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate()) .WillRepeatedly(ReturnRef(expected_cert)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(expected_cert, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -1123,7 +1124,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { std::string expected_cert = ""; EXPECT_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate()) .WillRepeatedly(ReturnRef(expected_cert)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(upstream_format.formatValue(request_headers, response_headers, response_trailers, @@ -1131,7 +1132,7 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { ProtoEq(ValueUtil::nullValue())); } { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); StreamInfoFormatter upstream_format("DOWNSTREAM_PEER_CERT"); EXPECT_EQ(absl::nullopt, upstream_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1616,7 +1617,7 @@ TEST(SubstitutionFormatterTest, DownstreamPeerCertVStartFormatter) { // No downstreamSslConnection { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); DownstreamPeerCertVStartFormatter cert_start_formart("DOWNSTREAM_PEER_CERT_V_START(%Y/%m/%d)"); EXPECT_EQ(absl::nullopt, cert_start_formart.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1629,7 +1630,7 @@ TEST(SubstitutionFormatterTest, DownstreamPeerCertVStartFormatter) { DownstreamPeerCertVStartFormatter cert_start_formart("DOWNSTREAM_PEER_CERT_V_START(%Y/%m/%d)"); auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(absl::nullopt)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, cert_start_formart.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(cert_start_formart.formatValue(request_headers, response_headers, response_trailers, @@ -1643,7 +1644,7 @@ TEST(SubstitutionFormatterTest, DownstreamPeerCertVStartFormatter) { time_t test_epoch = 1522280158; SystemTime time = std::chrono::system_clock::from_time_t(test_epoch); EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(time)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(AccessLogDateTimeFormatter::fromTime(time), cert_start_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1656,7 +1657,7 @@ TEST(SubstitutionFormatterTest, DownstreamPeerCertVStartFormatter) { time_t test_epoch = 1522280158; SystemTime time = std::chrono::system_clock::from_time_t(test_epoch); EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(time)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("Mar 28 23:35:58 2018 UTC", cert_start_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1672,7 +1673,7 @@ TEST(SubstitutionFormatterTest, DownstreamPeerCertVEndFormatter) { // No downstreamSslConnection { - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); DownstreamPeerCertVEndFormatter cert_end_format("DOWNSTREAM_PEER_CERT_V_END(%Y/%m/%d)"); EXPECT_EQ(absl::nullopt, cert_end_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1686,7 +1687,7 @@ TEST(SubstitutionFormatterTest, DownstreamPeerCertVEndFormatter) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, expirationPeerCertificate()) .WillRepeatedly(Return(absl::nullopt)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(absl::nullopt, cert_end_format.format(request_headers, response_headers, response_trailers, stream_info, body)); EXPECT_THAT(cert_end_format.formatValue(request_headers, response_headers, response_trailers, @@ -1700,7 +1701,7 @@ TEST(SubstitutionFormatterTest, DownstreamPeerCertVEndFormatter) { time_t test_epoch = 1522280158; SystemTime time = std::chrono::system_clock::from_time_t(test_epoch); EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(time)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ(AccessLogDateTimeFormatter::fromTime(time), cert_end_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -1713,7 +1714,7 @@ TEST(SubstitutionFormatterTest, DownstreamPeerCertVEndFormatter) { time_t test_epoch = 1522280158; SystemTime time = std::chrono::system_clock::from_time_t(test_epoch); EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(time)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_EQ("Mar 28 23:35:58 2018 UTC", cert_end_format.format(request_headers, response_headers, response_trailers, stream_info, body)); @@ -2610,7 +2611,7 @@ TEST(SubstitutionFormatterTest, JsonFormatterTest) { } TEST(SubstitutionFormatterTest, CompositeFormatterSuccess) { - StreamInfo::MockStreamInfo stream_info; + NiceMock stream_info; Http::TestRequestHeaderMapImpl request_header{{"first", "GET"}, {":path", "/"}}; Http::TestResponseHeaderMapImpl response_header{{"second", "PUT"}, {"test", "test"}}; Http::TestResponseTrailerMapImpl response_trailer{{"third", "POST"}, {"test-2", "test-2"}}; @@ -2708,7 +2709,7 @@ TEST(SubstitutionFormatterTest, CompositeFormatterSuccess) { auto connection_info = std::make_shared(); SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch); EXPECT_CALL(*connection_info, validFromPeerCertificate()).WillRepeatedly(Return(time)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); FormatterImpl formatter(format, false); EXPECT_EQ( @@ -2728,7 +2729,7 @@ TEST(SubstitutionFormatterTest, CompositeFormatterSuccess) { auto connection_info = std::make_shared(); SystemTime time = std::chrono::system_clock::from_time_t(expected_time_in_epoch); EXPECT_CALL(*connection_info, expirationPeerCertificate()).WillRepeatedly(Return(time)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); FormatterImpl formatter(format, false); EXPECT_EQ( diff --git a/test/common/grpc/async_client_impl_test.cc b/test/common/grpc/async_client_impl_test.cc index 0f67ac316d542..f30efdd16d953 100644 --- a/test/common/grpc/async_client_impl_test.cc +++ b/test/common/grpc/async_client_impl_test.cc @@ -27,7 +27,7 @@ class EnvoyAsyncClientImplTest : public testing::Test { public: EnvoyAsyncClientImplTest() : method_descriptor_(helloworld::Greeter::descriptor()->FindMethodByName("SayHello")) { - envoy::config::core::v3::GrpcService config; + config.mutable_envoy_grpc()->set_cluster_name("test_cluster"); auto& initial_metadata_entry = *config.mutable_initial_metadata()->Add(); @@ -39,6 +39,7 @@ class EnvoyAsyncClientImplTest : public testing::Test { ON_CALL(cm_.thread_local_cluster_, httpAsyncClient()).WillByDefault(ReturnRef(http_client_)); } + envoy::config::core::v3::GrpcService config; const Protobuf::MethodDescriptor* method_descriptor_; NiceMock http_client_; NiceMock cm_; @@ -46,6 +47,18 @@ class EnvoyAsyncClientImplTest : public testing::Test { DangerousDeprecatedTestTime test_time_; }; +TEST_F(EnvoyAsyncClientImplTest, ThreadSafe) { + NiceMock> grpc_callbacks; + + Thread::ThreadPtr thread = Thread::threadFactoryForTest().createThread([&]() { + // Verify that using the grpc client in a different thread cause assertion failure. + EXPECT_DEBUG_DEATH(grpc_client_->start(*method_descriptor_, grpc_callbacks, + Http::AsyncClient::StreamOptions()), + "isThreadSafe"); + }); + thread->join(); +} + // Validate that the host header is the cluster name in grpc config. TEST_F(EnvoyAsyncClientImplTest, HostIsClusterNameByDefault) { NiceMock> grpc_callbacks; @@ -129,9 +142,9 @@ TEST_F(EnvoyAsyncClientImplTest, MetadataIsInitialized) { .WillOnce(Invoke([&http_callbacks](Http::HeaderMap&, bool) { http_callbacks->onReset(); })); // Prepare the parent context of this call. - auto address_provider = std::make_shared( + auto connection_info_provider = std::make_shared( std::make_shared(expected_downstream_local_address), nullptr); - StreamInfo::StreamInfoImpl stream_info{test_time_.timeSystem(), address_provider}; + StreamInfo::StreamInfoImpl stream_info{test_time_.timeSystem(), connection_info_provider}; Http::AsyncClient::ParentContext parent_context{&stream_info}; Http::AsyncClient::StreamOptions stream_options; diff --git a/test/common/grpc/google_async_client_impl_test.cc b/test/common/grpc/google_async_client_impl_test.cc index 977d734e2fff3..e0eb023f3ef1a 100644 --- a/test/common/grpc/google_async_client_impl_test.cc +++ b/test/common/grpc/google_async_client_impl_test.cc @@ -45,7 +45,7 @@ class MockStubFactory : public GoogleStubFactory { return shared_stub_; } - MockGenericStub* stub_ = new MockGenericStub(); + NiceMock* stub_ = new NiceMock(); GoogleStubSharedPtr shared_stub_{stub_}; }; @@ -86,6 +86,20 @@ class EnvoyGoogleAsyncClientImplTest : public testing::Test { AsyncClient grpc_client_; }; +// Verify that grpc client check for thread consistency. +TEST_F(EnvoyGoogleAsyncClientImplTest, ThreadSafe) { + initialize(); + ON_CALL(*stub_factory_.stub_, PrepareCall_(_, _, _)).WillByDefault(Return(nullptr)); + Thread::ThreadPtr thread = Thread::threadFactoryForTest().createThread([&]() { + NiceMock> grpc_callbacks; + // Verify that using the grpc client in a different thread cause assertion failure. + EXPECT_DEBUG_DEATH(grpc_client_->start(*method_descriptor_, grpc_callbacks, + Http::AsyncClient::StreamOptions()), + "isThreadSafe"); + }); + thread->join(); +} + // Validate that a failure in gRPC stub call creation returns immediately with // status UNAVAILABLE. TEST_F(EnvoyGoogleAsyncClientImplTest, StreamHttpStartFail) { @@ -121,9 +135,9 @@ TEST_F(EnvoyGoogleAsyncClientImplTest, MetadataIsInitialized) { EXPECT_CALL(grpc_callbacks, onRemoteClose(Status::WellKnownGrpcStatus::Unavailable, "")); // Prepare the parent context of this call. - auto address_provider = std::make_shared( + auto connection_info_provider = std::make_shared( std::make_shared(expected_downstream_local_address), nullptr); - StreamInfo::StreamInfoImpl stream_info{test_time_.timeSystem(), address_provider}; + StreamInfo::StreamInfoImpl stream_info{test_time_.timeSystem(), connection_info_provider}; Http::AsyncClient::ParentContext parent_context{&stream_info}; Http::AsyncClient::StreamOptions stream_options; diff --git a/test/common/grpc/grpc_client_integration.h b/test/common/grpc/grpc_client_integration.h index 0c559cf699242..00aebe7cefdff 100644 --- a/test/common/grpc/grpc_client_integration.h +++ b/test/common/grpc/grpc_client_integration.h @@ -57,24 +57,6 @@ class GrpcClientIntegrationParamTest ClientType clientType() const override { return std::get<1>(GetParam()); } }; -class VersionedGrpcClientIntegrationParamTest - : public BaseGrpcClientIntegrationParamTest, - public testing::TestWithParam> { -public: - static std::string protocolTestParamsToString( - const ::testing::TestParamInfo>& p) { - return fmt::format("{}_{}_{}", - std::get<0>(p.param) == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6", - std::get<1>(p.param) == ClientType::GoogleGrpc ? "GoogleGrpc" : "EnvoyGrpc", - ApiVersion_Name(std::get<2>(p.param))); - } - Network::Address::IpVersion ipVersion() const override { return std::get<0>(GetParam()); } - ClientType clientType() const override { return std::get<1>(GetParam()); } - envoy::config::core::v3::ApiVersion apiVersion() const { return std::get<2>(GetParam()); } -}; - class DeltaSotwIntegrationParamTest : public BaseGrpcClientIntegrationParamTest, public testing::TestWithParam< @@ -106,26 +88,9 @@ class DeltaSotwIntegrationParamTest return; \ } -// For VersionedGrpcClientIntegrationParamTest, skip when testing with -// ENVOY_DISABLE_DEPRECATED_FEATURES. -#ifdef ENVOY_DISABLE_DEPRECATED_FEATURES -#define XDS_DEPRECATED_FEATURE_TEST_SKIP \ - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { \ - return; \ - } -#else -#define XDS_DEPRECATED_FEATURE_TEST_SKIP -#endif // ENVOY_DISABLE_DEPRECATED_FEATURES - #define GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::ValuesIn(TestEnvironment::getsGrpcVersionsForTest())) -#define VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS \ - testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ - testing::ValuesIn(TestEnvironment::getsGrpcVersionsForTest()), \ - testing::Values(envoy::config::core::v3::ApiVersion::V3, \ - envoy::config::core::v3::ApiVersion::V2, \ - envoy::config::core::v3::ApiVersion::AUTO)) #define DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS \ testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), \ testing::ValuesIn(TestEnvironment::getsGrpcVersionsForTest()), \ diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 619c2b4d13cb2..852bfb3f24256 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -148,6 +148,7 @@ class HelloworldStream : public MockAsyncStreamCallbacks reply_headers->addReference(value.first, value.second); } expectInitialMetadata(metadata); + fake_stream_->startGrpcStream(false); fake_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl(*reply_headers), false); } diff --git a/test/common/http/BUILD b/test/common/http/BUILD index df4b5763d2490..edd7e75664edf 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -319,6 +319,15 @@ envoy_cc_fuzz_test( ], ) +envoy_cc_test( + name = "inline_cookie_test", + srcs = ["inline_cookie_test.cc"], + deps = [ + "//source/common/http:header_map_lib", + "//test/mocks/runtime:runtime_mocks", + ], +) + envoy_cc_test( name = "header_utility_test", srcs = ["header_utility_test.cc"], diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 961aff304c181..5728e0fa31ada 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -1572,7 +1572,6 @@ TEST_F(AsyncClientImplUnitTest, NullRouteImplInitTest) { EXPECT_EQ(nullptr, route_impl_->decorator()); EXPECT_EQ(nullptr, route_impl_->tracingConfig()); - EXPECT_EQ(nullptr, route_impl_->perFilterConfig("")); EXPECT_EQ(Code::InternalServerError, route_entry.clusterNotFoundResponseCode()); EXPECT_EQ(nullptr, route_entry.corsPolicy()); EXPECT_EQ(nullptr, route_entry.hashPolicy()); @@ -1586,15 +1585,12 @@ TEST_F(AsyncClientImplUnitTest, NullRouteImplInitTest) { EXPECT_EQ(absl::nullopt, route_entry.grpcTimeoutOffset()); EXPECT_TRUE(route_entry.opaqueConfig().empty()); EXPECT_TRUE(route_entry.includeVirtualHostRateLimits()); - EXPECT_TRUE(route_entry.metadata().filter_metadata().empty()); - EXPECT_EQ(nullptr, route_entry.typedMetadata().get("bar")); - EXPECT_EQ(nullptr, route_entry.perFilterConfig("bar")); + EXPECT_EQ(nullptr, route_impl_->typedMetadata().get("bar")); EXPECT_TRUE(route_entry.upgradeMap().empty()); EXPECT_EQ(false, route_entry.internalRedirectPolicy().enabled()); EXPECT_TRUE(route_entry.shadowPolicies().empty()); EXPECT_TRUE(route_entry.virtualHost().rateLimitPolicy().empty()); EXPECT_EQ(nullptr, route_entry.virtualHost().corsPolicy()); - EXPECT_EQ(nullptr, route_entry.virtualHost().perFilterConfig("bar")); EXPECT_FALSE(route_entry.virtualHost().includeAttemptCountInRequest()); EXPECT_FALSE(route_entry.virtualHost().includeAttemptCountInResponse()); EXPECT_FALSE(route_entry.virtualHost().routeConfig().usesVhds()); diff --git a/test/common/http/codec_client_test.cc b/test/common/http/codec_client_test.cc index 202fbc7ab71ea..87fecd8ebe183 100644 --- a/test/common/http/codec_client_test.cc +++ b/test/common/http/codec_client_test.cc @@ -280,17 +280,6 @@ TEST_F(CodecClientTest, WatermarkPassthrough) { connection_cb_->onBelowWriteBufferLowWatermark(); } -TEST_F(CodecClientTest, SSLConnectionInfo) { - initialize(); - std::string session_id = "D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B"; - auto connection_info = std::make_shared>(); - ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id)); - EXPECT_CALL(*connection_, ssl()).WillRepeatedly(Return(connection_info)); - connection_cb_->onEvent(Network::ConnectionEvent::Connected); - EXPECT_NE(nullptr, stream_info_.downstreamSslConnection()); - EXPECT_EQ(session_id, stream_info_.downstreamSslConnection()->sessionId()); -} - // Test the codec getting input from a real TCP connection. class CodecNetworkTest : public Event::TestUsingSimulatedTime, public testing::TestWithParam { @@ -300,7 +289,7 @@ class CodecNetworkTest : public Event::TestUsingSimulatedTime, auto socket = std::make_shared( Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), source_address_, + socket->connectionInfoProvider().localAddress(), source_address_, Network::Test::createRawBufferSocket(), nullptr); upstream_listener_ = dispatcher_->createListener(std::move(socket), listener_callbacks_, true); client_connection_ = client_connection.get(); diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 484389ab62360..35aa7dd8ad977 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -583,9 +583,9 @@ DEFINE_PROTO_FUZZER(const test::common::http::ConnManagerImplTestCase& input) { ON_CALL(Const(filter_callbacks.connection_), ssl()).WillByDefault(Return(ssl_connection)); ON_CALL(filter_callbacks.connection_, close(_)) .WillByDefault(InvokeWithoutArgs([&connection_alive] { connection_alive = false; })); - filter_callbacks.connection_.stream_info_.downstream_address_provider_->setLocalAddress( + filter_callbacks.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( std::make_shared("127.0.0.1")); - filter_callbacks.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("0.0.0.0")); ConnectionManagerImpl conn_manager(config, drain_close, random, http_context, runtime, local_info, diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index c1fe56f0fbede..05a3a0d59d055 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -301,7 +301,7 @@ TEST_F(HttpConnectionManagerImplTest, PopulateStreamInfo) { decoder_ = &conn_manager_->newStream(response_encoder_); EXPECT_EQ(requestIDExtension().get(), decoder_->streamInfo().getRequestIDProvider()); - EXPECT_EQ(ssl_connection_, decoder_->streamInfo().downstreamSslConnection()); + EXPECT_EQ(ssl_connection_, decoder_->streamInfo().downstreamAddressProvider().sslConnection()); EXPECT_EQ(filter_callbacks_.connection_.id_, decoder_->streamInfo().downstreamAddressProvider().connectionID().value()); EXPECT_EQ(server_name_, decoder_->streamInfo().downstreamAddressProvider().requestedServerName()); @@ -697,8 +697,7 @@ TEST_F(HttpConnectionManagerImplTest, RouteOverride) { EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(default_route->routeEntry(), - decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->streamInfo().route()); EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo()); // Not clearing cached route returns cached route and doesn't invoke cb. @@ -749,8 +748,7 @@ TEST_F(HttpConnectionManagerImplTest, RouteOverride) { EXPECT_EQ(default_route, route); EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(default_route->routeEntry(), - decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->streamInfo().route()); EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo()); return FilterHeadersStatus::Continue; @@ -778,8 +776,7 @@ TEST_F(HttpConnectionManagerImplTest, RouteOverride) { EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { EXPECT_EQ(default_route, decoder_filters_[1]->callbacks_->route()); - EXPECT_EQ(default_route->routeEntry(), - decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_route, decoder_filters_[1]->callbacks_->streamInfo().route()); EXPECT_EQ(default_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo()); int ctr = 0; @@ -807,8 +804,7 @@ TEST_F(HttpConnectionManagerImplTest, RouteOverride) { decoder_filters_[1]->callbacks_->route(cb); EXPECT_EQ(foo_bar_route, decoder_filters_[1]->callbacks_->route()); - EXPECT_EQ(foo_bar_route->routeEntry(), - decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(foo_bar_route, decoder_filters_[1]->callbacks_->streamInfo().route()); EXPECT_EQ(foo_bar_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo()); return FilterHeadersStatus::Continue; @@ -886,8 +882,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterSetRouteToDelegatingRouteWithCluster EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->route()); EXPECT_EQ(default_cluster_name, decoder_filters_[0]->callbacks_->route()->routeEntry()->clusterName()); - EXPECT_EQ(default_route->routeEntry(), - decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(default_route, decoder_filters_[0]->callbacks_->streamInfo().route()); EXPECT_EQ(default_cluster->info(), decoder_filters_[0]->callbacks_->clusterInfo()); // Instantiate a DelegatingRoute child class object and invoke setRoute from @@ -911,8 +906,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterSetRouteToDelegatingRouteWithCluster // clusterName() method. EXPECT_EQ(foo_cluster_name, decoder_filters_[1]->callbacks_->route()->routeEntry()->clusterName()); - EXPECT_EQ(foo_route_override->routeEntry(), - decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(foo_route_override, decoder_filters_[1]->callbacks_->streamInfo().route()); // Tests that setRoute correctly sets cached_cluster_info_ EXPECT_EQ(foo_cluster->info(), decoder_filters_[1]->callbacks_->clusterInfo()); @@ -1074,12 +1068,8 @@ TEST_F(HttpConnectionManagerImplTest, DelegatingRouteEntryAllCalls) { EXPECT_EQ(default_route->routeEntry()->includeVirtualHostRateLimits(), delegating_route_foo->routeEntry()->includeVirtualHostRateLimits()); - // NOTE: no coverage for routeEntry()->typedMetadata() // "The mock function has no default action set, and its return type has no default value // set" - - EXPECT_EQ(default_route->routeEntry()->metadata().filter_metadata().size(), - delegating_route_foo->routeEntry()->metadata().filter_metadata().size()); EXPECT_EQ(default_route->routeEntry()->tlsContextMatchCriteria(), delegating_route_foo->routeEntry()->tlsContextMatchCriteria()); @@ -1088,8 +1078,6 @@ TEST_F(HttpConnectionManagerImplTest, DelegatingRouteEntryAllCalls) { EXPECT_EQ(default_route->routeEntry()->pathMatchCriterion().matchType(), delegating_route_foo->routeEntry()->pathMatchCriterion().matchType()); - EXPECT_EQ(default_route->routeEntry()->perFilterConfig("bar"), - delegating_route_foo->routeEntry()->perFilterConfig("bar")); EXPECT_EQ(default_route->routeEntry()->includeAttemptCountInRequest(), delegating_route_foo->routeEntry()->includeAttemptCountInRequest()); EXPECT_EQ(default_route->routeEntry()->includeAttemptCountInResponse(), @@ -1962,7 +1950,7 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLog) { EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().localAddress()); EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().remoteAddress()); EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().directRemoteAddress()); - EXPECT_NE(nullptr, stream_info.routeEntry()); + EXPECT_NE(nullptr, stream_info.route()); EXPECT_EQ(stream_info.downstreamAddressProvider().remoteAddress()->ip()->addressAsString(), xff_address); @@ -2103,7 +2091,7 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithTrailers) { EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().localAddress()); EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().remoteAddress()); EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().directRemoteAddress()); - EXPECT_NE(nullptr, stream_info.routeEntry()); + EXPECT_NE(nullptr, stream_info.route()); })); EXPECT_CALL(*codec_, dispatch(_)) @@ -2153,7 +2141,7 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogWithInvalidRequest) { EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().localAddress()); EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().remoteAddress()); EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().directRemoteAddress()); - EXPECT_EQ(nullptr, stream_info.routeEntry()); + EXPECT_EQ(nullptr, stream_info.route()); })); EXPECT_CALL(*codec_, dispatch(_)) @@ -2251,8 +2239,8 @@ TEST_F(HttpConnectionManagerImplTest, TestAccessLogSsl) { EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().localAddress()); EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().remoteAddress()); EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().directRemoteAddress()); - EXPECT_NE(nullptr, stream_info.downstreamSslConnection()); - EXPECT_NE(nullptr, stream_info.routeEntry()); + EXPECT_NE(nullptr, stream_info.downstreamAddressProvider().sslConnection()); + EXPECT_NE(nullptr, stream_info.route()); })); EXPECT_CALL(*codec_, dispatch(_)) diff --git a/test/common/http/conn_manager_impl_test_2.cc b/test/common/http/conn_manager_impl_test_2.cc index 82e09cee54e91..44bd0db57ddec 100644 --- a/test/common/http/conn_manager_impl_test_2.cc +++ b/test/common/http/conn_manager_impl_test_2.cc @@ -1002,7 +1002,7 @@ TEST_F(HttpConnectionManagerImplTest, Filter) { EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->streamInfo().route()); EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); decoder_filters_[0]->callbacks_->clearRouteCache(); return FilterHeadersStatus::Continue; @@ -1011,7 +1011,7 @@ TEST_F(HttpConnectionManagerImplTest, Filter) { EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route()); - EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->streamInfo().route()); // RDS & CDS consistency problem: route2 points to fake_cluster2, which doesn't exist. EXPECT_EQ(nullptr, decoder_filters_[1]->callbacks_->clusterInfo()); decoder_filters_[1]->callbacks_->clearRouteCache(); @@ -1022,7 +1022,7 @@ TEST_F(HttpConnectionManagerImplTest, Filter) { .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->clusterInfo()); EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->route()); - EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(nullptr, decoder_filters_[2]->callbacks_->streamInfo().route()); return FilterHeadersStatus::StopIteration; })); EXPECT_CALL(*decoder_filters_[2], decodeComplete()); @@ -1060,7 +1060,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterSetRouteToNullPtr) { EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->streamInfo().route()); EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); decoder_filters_[0]->callbacks_->setRoute(nullptr); return FilterHeadersStatus::Continue; @@ -1069,7 +1069,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterSetRouteToNullPtr) { EXPECT_CALL(*decoder_filters_[1], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { EXPECT_EQ(nullptr, decoder_filters_[1]->callbacks_->route()); - EXPECT_EQ(nullptr, decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(nullptr, decoder_filters_[1]->callbacks_->streamInfo().route()); EXPECT_EQ(nullptr, decoder_filters_[1]->callbacks_->clusterInfo()); return FilterHeadersStatus::StopIteration; })); @@ -2492,7 +2492,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsUpdate) { // Now route config provider returns something. EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->streamInfo().route()); EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); return FilterHeadersStatus::StopIteration; @@ -2557,7 +2557,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsCrossScopeReroute) { EXPECT_EQ(test_headers.get_("scope_key"), "bar"); // Route now switched to route2 as header "scope_key" has changed. EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->route()); - EXPECT_EQ(route2->routeEntry(), decoder_filters_[1]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(route2, decoder_filters_[1]->callbacks_->streamInfo().route()); return FilterHeadersStatus::StopIteration; })); @@ -2599,7 +2599,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSrdsRouteFound) { EXPECT_CALL(*decoder_filters_[0], decodeHeaders(_, true)) .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->route()); - EXPECT_EQ(route1->routeEntry(), decoder_filters_[0]->callbacks_->streamInfo().routeEntry()); + EXPECT_EQ(route1, decoder_filters_[0]->callbacks_->streamInfo().route()); EXPECT_EQ(fake_cluster1->info(), decoder_filters_[0]->callbacks_->clusterInfo()); return FilterHeadersStatus::StopIteration; })); diff --git a/test/common/http/conn_manager_impl_test_base.cc b/test/common/http/conn_manager_impl_test_base.cc index 9f7b81bb97ae1..0ecfafee44fd1 100644 --- a/test/common/http/conn_manager_impl_test_base.cc +++ b/test/common/http/conn_manager_impl_test_base.cc @@ -64,14 +64,16 @@ void HttpConnectionManagerImplTest::setup(bool ssl, const std::string& server_na .WillByDefault([&](auto, auto callback) { return filter_callbacks_.connection_.dispatcher_.createTimer(callback).release(); }); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( std::make_shared("127.0.0.1", 443)); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("0.0.0.0")); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_ + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ ->setDirectRemoteAddressForTest(std::make_shared("0.0.0.0")); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRequestedServerName( - server_name_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRequestedServerName(server_name_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setSslConnection( + ssl_connection_); conn_manager_ = std::make_unique( *this, drain_close_, random_, http_context_, runtime_, local_info_, cluster_manager_, overload_manager_, test_time_.timeSystem()); diff --git a/test/common/http/conn_manager_utility_test.cc b/test/common/http/conn_manager_utility_test.cc index eed2aec24b99d..0569aa451b056 100644 --- a/test/common/http/conn_manager_utility_test.cc +++ b/test/common/http/conn_manager_utility_test.cc @@ -224,24 +224,68 @@ TEST_F(ConnectionManagerUtilityTest, DetermineNextProtocol) { // Verify external request and XFF is set when we are using remote address and the address is // external. TEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWhenNotLocalHostRemoteAddress) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("12.12.12.12")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers; EXPECT_EQ((MutateRequestRet{"12.12.12.12:0", false, Tracing::Reason::NotTraceable}), callMutateRequestHeaders(headers, Protocol::Http2)); - EXPECT_EQ(connection_.stream_info_.downstream_address_provider_->remoteAddress() + EXPECT_EQ(connection_.stream_info_.downstream_connection_info_provider_->remoteAddress() ->ip() ->addressAsString(), headers.get_(Headers::get().ForwardedFor)); } +TEST_F(ConnectionManagerUtilityTest, RemoveRefererIfUrlInvalid) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.sanitize_http_header_referer", "true"}}); + + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + std::make_shared("10.0.0.1")); + ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); + TestRequestHeaderMapImpl headers{{"referer", "foo"}}; + EXPECT_EQ((MutateRequestRet{"10.0.0.1:0", true, Tracing::Reason::NotTraceable}), + callMutateRequestHeaders(headers, Protocol::Http2)); + EXPECT_TRUE(headers.get(Http::CustomHeaders::get().Referer).empty()); +} + +TEST_F(ConnectionManagerUtilityTest, RemoveRefererIfMultipleEntriesAreFound) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.sanitize_http_header_referer", "true"}}); + + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + std::make_shared("10.0.0.1")); + ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); + TestRequestHeaderMapImpl headers{{"referer", "https://example.com/"}, + {"referer", "https://google.com/"}}; + EXPECT_EQ((MutateRequestRet{"10.0.0.1:0", true, Tracing::Reason::NotTraceable}), + callMutateRequestHeaders(headers, Protocol::Http2)); + EXPECT_TRUE(headers.get(Http::CustomHeaders::get().Referer).empty()); +} + +TEST_F(ConnectionManagerUtilityTest, ValidRefererPassesSanitization) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.sanitize_http_header_referer", "true"}}); + + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + std::make_shared("10.0.0.1")); + ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); + TestRequestHeaderMapImpl headers{{"referer", "https://example.com/"}}; + EXPECT_EQ((MutateRequestRet{"10.0.0.1:0", true, Tracing::Reason::NotTraceable}), + callMutateRequestHeaders(headers, Protocol::Http2)); + EXPECT_EQ("https://example.com/", + headers.get(Http::CustomHeaders::get().Referer)[0]->value().getStringView()); +} + // Verify that we don't append XFF when skipXffAppend(), even if using remote // address and where the address is external. TEST_F(ConnectionManagerUtilityTest, SkipXffAppendUseRemoteAddress) { EXPECT_CALL(config_, skipXffAppend()).WillOnce(Return(true)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("12.12.12.12")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers; @@ -255,7 +299,7 @@ TEST_F(ConnectionManagerUtilityTest, SkipXffAppendUseRemoteAddress) { // address and where the address is external. TEST_F(ConnectionManagerUtilityTest, SkipXffAppendPassThruUseRemoteAddress) { EXPECT_CALL(config_, skipXffAppend()).WillOnce(Return(true)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("12.12.12.12")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-forwarded-for", "198.51.100.1"}}; @@ -271,7 +315,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternal) { ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1)); EXPECT_CALL(config_, skipXffAppend()).WillOnce(Return(true)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("12.12.12.12")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-forwarded-proto", "https"}}; @@ -289,7 +333,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternal) { TEST_F(ConnectionManagerUtilityTest, OverwriteForwardedProtoWhenExternal) { ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(0)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("127.0.0.1")); TestRequestHeaderMapImpl headers{{"x-forwarded-proto", "https"}}; Network::Address::Ipv4Instance local_address("10.3.2.1"); @@ -307,7 +351,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternalButSetSch ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1)); EXPECT_CALL(config_, skipXffAppend()).WillOnce(Return(true)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("12.12.12.12")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-forwarded-proto", "foo"}}; @@ -322,7 +366,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveForwardedProtoWhenInternalButSetSch TEST_F(ConnectionManagerUtilityTest, SchemeIsRespected) { ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(0)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("127.0.0.1")); TestRequestHeaderMapImpl headers{{"x-forwarded-proto", "https"}, {":scheme", "https"}}; Network::Address::Ipv4Instance local_address("10.3.2.1"); @@ -337,7 +381,7 @@ TEST_F(ConnectionManagerUtilityTest, SchemeIsRespected) { TEST_F(ConnectionManagerUtilityTest, SchemeOverwrite) { ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(0)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("127.0.0.1")); TestRequestHeaderMapImpl headers{}; Network::Address::Ipv4Instance local_address("10.3.2.1"); @@ -362,7 +406,7 @@ TEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWhenUserConfiguredRemoteAdd ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, localAddress()).WillByDefault(ReturnRef(local_address)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("12.12.12.12")); TestRequestHeaderMapImpl headers; @@ -373,7 +417,7 @@ TEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWhenUserConfiguredRemoteAdd // Verify internal request and XFF is set when we are using remote address the address is internal. TEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWhenLocalHostRemoteAddress) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("127.0.0.1")); Network::Address::Ipv4Instance local_address("10.3.2.1"); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); @@ -387,7 +431,7 @@ TEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWhenLocalHostRemoteAddress) // Verify that we trust Nth address from XFF when using remote address with xff_num_trusted_hops. TEST_F(ConnectionManagerUtilityTest, UseRemoteAddressWithXFFTrustedHops) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("203.0.113.128")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1)); @@ -404,7 +448,7 @@ TEST_F(ConnectionManagerUtilityTest, UseXFFTrustedHopsWithoutRemoteAddress) { detection_extensions_.push_back(getXFFExtension(1)); ON_CALL(config_, originalIpDetectionExtensions()).WillByDefault(ReturnRef(detection_extensions_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("127.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false)); ON_CALL(config_, xffNumTrustedHops()).WillByDefault(Return(1)); @@ -433,7 +477,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveHopByHop) { // Verify that we don't set the via header on requests/responses when empty. TEST_F(ConnectionManagerUtilityTest, ViaEmpty) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); @@ -451,7 +495,7 @@ TEST_F(ConnectionManagerUtilityTest, ViaEmpty) { // Verify that we append a non-empty via header on requests/responses. TEST_F(ConnectionManagerUtilityTest, ViaAppend) { via_ = "foo"; - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); @@ -471,7 +515,7 @@ TEST_F(ConnectionManagerUtilityTest, ViaAppend) { // Verify that we don't set user agent when it is already set. TEST_F(ConnectionManagerUtilityTest, UserAgentDontSet) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"user-agent", "foo"}}; @@ -485,7 +529,7 @@ TEST_F(ConnectionManagerUtilityTest, UserAgentDontSet) { // Verify that we do set user agent when it is empty. TEST_F(ConnectionManagerUtilityTest, UserAgentSetWhenIncomingEmpty) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(local_info_, nodeName()).WillByDefault(ReturnRef(canary_node_)); @@ -540,7 +584,7 @@ TEST_F(ConnectionManagerUtilityTest, InternalServiceForceTrace) { // Test generating request-id in various edge request scenarios. TEST_F(ConnectionManagerUtilityTest, EdgeRequestRegenerateRequestIdAndWipeDownstream) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("34.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(runtime_.snapshot_, featureEnabled("tracing.global_enabled", @@ -617,7 +661,7 @@ TEST_F(ConnectionManagerUtilityTest, ExternalRequestPreserveRequestIdAndDownstre // Verify that we don't overwrite user agent, but do set x-envoy-downstream-service-cluster // correctly. TEST_F(ConnectionManagerUtilityTest, UserAgentSetIncomingUserAgent) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); @@ -635,7 +679,7 @@ TEST_F(ConnectionManagerUtilityTest, UserAgentSetIncomingUserAgent) { // Verify that we set both user agent and x-envoy-downstream-service-cluster. TEST_F(ConnectionManagerUtilityTest, UserAgentSetNoIncomingUserAgent) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); user_agent_ = "bar"; @@ -673,7 +717,7 @@ TEST_F(ConnectionManagerUtilityTest, RequestIdGeneratedWhenItsNotPresent) { // Make sure we do not overwrite x-request-id if the request is internal. TEST_F(ConnectionManagerUtilityTest, DoNotOverrideRequestIdIfPresentWhenInternalRequest) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-request-id", "original_request_id"}}; @@ -686,7 +730,7 @@ TEST_F(ConnectionManagerUtilityTest, DoNotOverrideRequestIdIfPresentWhenInternal // Make sure that we do overwrite x-request-id for "edge" external requests. TEST_F(ConnectionManagerUtilityTest, OverrideRequestIdForExternalRequests) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("134.2.2.11")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-request-id", "original"}}; @@ -700,7 +744,7 @@ TEST_F(ConnectionManagerUtilityTest, OverrideRequestIdForExternalRequests) { // A request that uses remote address and is from an external address should be treated as an // external request with all internal only headers cleaned. TEST_F(ConnectionManagerUtilityTest, ExternalAddressExternalRequestUseRemote) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("50.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); route_config_.internal_only_headers_.push_back(LowerCaseString("custom_header")); @@ -741,7 +785,7 @@ TEST_F(ConnectionManagerUtilityTest, ExternalAddressExternalRequestUseRemote) { // A request that is from an external address, but does not use remote address, should pull the // address from XFF. TEST_F(ConnectionManagerUtilityTest, ExternalAddressExternalRequestDontUseRemote) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("60.0.0.2")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false)); TestRequestHeaderMapImpl headers{{"x-envoy-external-address", "60.0.0.1"}, @@ -755,7 +799,7 @@ TEST_F(ConnectionManagerUtilityTest, ExternalAddressExternalRequestDontUseRemote // Verify that if XFF is invalid we fall back to remote address, even if it is a pipe. TEST_F(ConnectionManagerUtilityTest, PipeAddressInvalidXFFtDontUseRemote) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("/blah")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(false)); TestRequestHeaderMapImpl headers{{"x-forwarded-for", "blah"}}; @@ -769,7 +813,7 @@ TEST_F(ConnectionManagerUtilityTest, PipeAddressInvalidXFFtDontUseRemote) { // includes only internal addresses. Note that this is legacy behavior. See the comments // in mutateRequestHeaders() for more info. TEST_F(ConnectionManagerUtilityTest, AppendInternalAddressXffNotInternalRequest) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-forwarded-for", "10.0.0.2"}}; @@ -782,7 +826,7 @@ TEST_F(ConnectionManagerUtilityTest, AppendInternalAddressXffNotInternalRequest) // A request that is from an internal address and uses remote address should be an internal request. // It should also preserve x-envoy-external-address. TEST_F(ConnectionManagerUtilityTest, ExternalAddressInternalRequestUseRemote) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("10.0.0.1")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); TestRequestHeaderMapImpl headers{{"x-envoy-external-address", "60.0.0.1"}, @@ -1764,7 +1808,7 @@ TEST_F(ConnectionManagerUtilityTest, RedirectAfterAllOtherNormalizations) { // test preserve_external_request_id true does not reset the passed requestId if passed TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestId) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("134.2.2.11")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true)); @@ -1780,7 +1824,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestId) { // test preserve_external_request_id true but generates new request id when not passed TEST_F(ConnectionManagerUtilityTest, PreseverExternalRequestIdNoReqId) { - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("134.2.2.11")); ON_CALL(config_, useRemoteAddress()).WillByDefault(Return(true)); ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(true)); @@ -1817,7 +1861,7 @@ TEST_F(ConnectionManagerUtilityTest, PreserveExternalRequestIdNoEdgeRequestGener // test preserve_external_request_id false edge request generates new request id TEST_F(ConnectionManagerUtilityTest, NoPreserveExternalRequestIdEdgeRequestGenerateRequestId) { ON_CALL(config_, preserveExternalRequestId()).WillByDefault(Return(false)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("134.2.2.11")); // with request id @@ -1892,5 +1936,95 @@ TEST_F(ConnectionManagerUtilityTest, OriginalIPDetectionExtension) { } } +TEST_F(ConnectionManagerUtilityTest, RejectPathWithFragmentByDefault) { + TestRequestHeaderMapImpl header_map{{":path", "/foo/bar#boom"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Reject, + ConnectionManagerUtility::maybeNormalizePath(header_map, config_)); + + TestRequestHeaderMapImpl header_map_just_fragment{{":path", "#boom"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Reject, + ConnectionManagerUtility::maybeNormalizePath(header_map_just_fragment, config_)); + + // Percent encoded # should not cause rejection + TestRequestHeaderMapImpl header_map_with_percent_23{{":path", "/foo/bar/../%23boom"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_with_percent_23, config_)); + EXPECT_EQ(header_map_with_percent_23.getPathValue(), "/foo/bar/../%23boom"); + + // With normalization enabled the %23 should not be decoded + ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true)); + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_with_percent_23, config_)); + // Path normalization should collapse /../ + EXPECT_EQ(header_map_with_percent_23.getPathValue(), "/foo/%23boom"); +} + +TEST_F(ConnectionManagerUtilityTest, DropFragmentFromPathWithOverride) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http_reject_path_with_fragment", "false"}}); + + TestRequestHeaderMapImpl header_map{{":path", "/foo/bar#boom"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map, config_)); + EXPECT_EQ(header_map.getPathValue(), "/foo/bar"); + + TestRequestHeaderMapImpl header_map_just_fragment{{":path", "#"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_just_fragment, config_)); + EXPECT_EQ(header_map_just_fragment.getPathValue(), ""); + + TestRequestHeaderMapImpl header_map_just_fragment2{{":path", "/#"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_just_fragment2, config_)); + EXPECT_EQ(header_map_just_fragment2.getPathValue(), "/"); + + TestRequestHeaderMapImpl header_map_with_empty_fragment{{":path", "/foo/baz/#"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_with_empty_fragment, config_)); + EXPECT_EQ(header_map_with_empty_fragment.getPathValue(), "/foo/baz/"); + + // Check that normalization does not "see" stripped path + ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true)); + TestRequestHeaderMapImpl header_map_with_fragment2{{":path", "/foo/../baz/#fragment"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_with_fragment2, config_)); + EXPECT_EQ(header_map_with_fragment2.getPathValue(), "/baz/"); +} + +TEST_F(ConnectionManagerUtilityTest, KeepFragmentFromPathWithBothOverrides) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http_reject_path_with_fragment", "false"}}); + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled", "false"}}); + + TestRequestHeaderMapImpl header_map{{":path", "/foo/bar#boom"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map, config_)); + EXPECT_EQ(header_map.getPathValue(), "/foo/bar#boom"); + + TestRequestHeaderMapImpl header_map_just_fragment{{":path", "#"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_just_fragment, config_)); + EXPECT_EQ(header_map_just_fragment.getPathValue(), "#"); + + TestRequestHeaderMapImpl header_map_just_fragment2{{":path", "/#"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_just_fragment2, config_)); + EXPECT_EQ(header_map_just_fragment2.getPathValue(), "/#"); + + TestRequestHeaderMapImpl header_map_with_empty_fragment{{":path", "/foo/baz/#"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_with_empty_fragment, config_)); + EXPECT_EQ(header_map_with_empty_fragment.getPathValue(), "/foo/baz/#"); + + ON_CALL(config_, shouldNormalizePath()).WillByDefault(Return(true)); + TestRequestHeaderMapImpl header_map_with_fragment2{{":path", "/foo/../baz/#fragment"}}; + EXPECT_EQ(ConnectionManagerUtility::NormalizePathAction::Continue, + ConnectionManagerUtility::maybeNormalizePath(header_map_with_fragment2, config_)); + EXPECT_EQ(header_map_with_fragment2.getPathValue(), "/baz/%23fragment"); +} + } // namespace Http } // namespace Envoy diff --git a/test/common/http/conn_pool_grid_test.cc b/test/common/http/conn_pool_grid_test.cc index 89affdb9a4d04..908e82bf1282b 100644 --- a/test/common/http/conn_pool_grid_test.cc +++ b/test/common/http/conn_pool_grid_test.cc @@ -125,7 +125,7 @@ class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public te void addHttp3AlternateProtocol() { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); const std::vector protocols = { - {"h3-29", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; + {"h3", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); } diff --git a/test/common/http/filter_manager_test.cc b/test/common/http/filter_manager_test.cc index ed71a44fe5ff4..ebd3d1128455c 100644 --- a/test/common/http/filter_manager_test.cc +++ b/test/common/http/filter_manager_test.cc @@ -536,7 +536,9 @@ TEST_F(FilterManagerTest, MultipleOnLocalReply) { EXPECT_CALL(*decoder_filter, onLocalReply(_)); EXPECT_CALL(*stream_filter, onLocalReply(_)); EXPECT_CALL(*encoder_filter, onLocalReply(_)); - EXPECT_CALL(dispatcher_, trackedObjectStackIsEmpty()); + // trackedObjectStackIsEmpty() is never called since sendLocalReply will abort encoder filter + // iteration. + EXPECT_CALL(dispatcher_, trackedObjectStackIsEmpty()).Times(0); decoder_filter->callbacks_->sendLocalReply(Code::InternalServerError, "body", nullptr, absl::nullopt, "details"); diff --git a/test/common/http/header_map_impl_test.cc b/test/common/http/header_map_impl_test.cc index ab770d1b48b1a..98c35fae521c1 100644 --- a/test/common/http/header_map_impl_test.cc +++ b/test/common/http/header_map_impl_test.cc @@ -1,4 +1,5 @@ #include +#include #include #include @@ -1371,21 +1372,58 @@ TEST_P(HeaderMapImplTest, ValidHeaderString) { } TEST_P(HeaderMapImplTest, HttpTraceContextTest) { + { + TestRequestHeaderMapImpl request_headers; + + // Protocol. + EXPECT_EQ(request_headers.protocol(), ""); + request_headers.addCopy(Http::Headers::get().Protocol, "HTTP/x"); + EXPECT_EQ(request_headers.protocol(), "HTTP/x"); + + // Authority. + EXPECT_EQ(request_headers.authority(), ""); + request_headers.addCopy(Http::Headers::get().Host, "test.com:233"); + EXPECT_EQ(request_headers.authority(), "test.com:233"); + + // Path. + EXPECT_EQ(request_headers.path(), ""); + request_headers.addCopy(Http::Headers::get().Path, "/anything"); + EXPECT_EQ(request_headers.path(), "/anything"); + + // Method. + EXPECT_EQ(request_headers.method(), ""); + request_headers.addCopy(Http::Headers::get().Method, Http::Headers::get().MethodValues.Options); + EXPECT_EQ(request_headers.method(), Http::Headers::get().MethodValues.Options); + } + + { + size_t size = 0; + TestRequestHeaderMapImpl request_headers{{"host", "foo"}, {"bar", "var"}, {"ok", "no"}}; + request_headers.forEach([&size](absl::string_view key, absl::string_view val) { + size += key.size(); + size += val.size(); + return true; + }); + // 'host' will be converted to ':authority'. + EXPECT_EQ(23, size); + EXPECT_EQ(23, request_headers.byteSize()); + } + { TestRequestHeaderMapImpl request_headers{{"host", "foo"}}; - EXPECT_EQ(request_headers.getTraceContext("host").value(), "foo"); + EXPECT_EQ(request_headers.getByKey("host").value(), "foo"); - request_headers.setTraceContext("trace_key", "trace_value"); - EXPECT_EQ(request_headers.getTraceContext("trace_key").value(), "trace_value"); + request_headers.setByKey("trace_key", "trace_value"); + EXPECT_EQ(request_headers.getByKey("trace_key").value(), "trace_value"); std::string trace_ref_key = "trace_ref_key"; - request_headers.setTraceContextReferenceKey(trace_ref_key, "trace_value"); + request_headers.setByReferenceKey(trace_ref_key, "trace_value"); auto* header_entry = request_headers.get(Http::LowerCaseString(trace_ref_key))[0]; EXPECT_EQ(reinterpret_cast(trace_ref_key.data()), reinterpret_cast(header_entry->key().getStringView().data())); std::string trace_ref_value = "trace_ref_key"; - request_headers.setTraceContextReference(trace_ref_key, trace_ref_value); + request_headers.setByReference(trace_ref_key, trace_ref_value); header_entry = request_headers.get(Http::LowerCaseString(trace_ref_key))[0]; EXPECT_EQ(reinterpret_cast(trace_ref_key.data()), reinterpret_cast(header_entry->key().getStringView().data())); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 6441c7df73300..948cd2ff4853e 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -52,6 +52,9 @@ namespace CommonUtility = ::Envoy::Http2::Utility; class Http2CodecImplTestFixture { public: + static bool slowContainsStreamId(int id, ConnectionImpl& connection) { + return connection.slowContainsStreamId(id); + } // The Http::Connection::dispatch method does not throw (any more). However unit tests in this // file use codecs for sending test data through mock network connections to the codec under test. // It is infeasible to plumb error codes returned by the dispatch() method of the codecs under @@ -406,6 +409,8 @@ TEST_P(Http2CodecImplTest, TrailerStatus) { HttpTestUtility::addDefaultHeaders(request_headers); EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, true).ok()); + EXPECT_TRUE(Http2CodecImplTestFixture::slowContainsStreamId(1, *client_)); + EXPECT_FALSE(Http2CodecImplTestFixture::slowContainsStreamId(100, *client_)); TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); @@ -1036,7 +1041,7 @@ TEST_P(Http2CodecImplTest, DumpsStreamlessConnectionWithoutAllocatingMemory) { "per_stream_buffer_limit_: 268435456, allow_metadata_: 0, " "stream_error_on_invalid_http_messaging_: 0, is_outbound_flood_monitored_control_frame_: " "0, skip_encoding_empty_trailers_: 1, dispatching_: 0, raised_goaway_: 0, " - "pending_deferred_reset_: 0\n" + "pending_deferred_reset_streams_.size(): 0\n" " &protocol_constraints_: \n" " ProtocolConstraints")); EXPECT_THAT( @@ -1201,7 +1206,7 @@ TEST_P(Http2CodecImplTest, ClientConnectionShouldDumpCorrespondingRequestWithout class Http2CodecImplDeferredResetTest : public Http2CodecImplTest {}; -TEST_P(Http2CodecImplDeferredResetTest, DeferredResetClient) { +TEST_P(Http2CodecImplDeferredResetTest, NoDeferredResetForClientStreams) { initialize(); InSequence s; @@ -1209,67 +1214,145 @@ TEST_P(Http2CodecImplDeferredResetTest, DeferredResetClient) { MockStreamCallbacks client_stream_callbacks; request_encoder_->getStream().addCallbacks(client_stream_callbacks); - // Do a request, but pause server dispatch so we don't send window updates. This will result in a - // deferred reset, followed by a pending frames flush which will cause the stream to actually - // be reset immediately since we are outside of dispatch context. + // Encode headers, encode data and send reset stream from the call stack of decodeHeaders in + // order to delay sendPendingFrames processing in those calls until the end of dispatch. The + // call to resetStream goes down the regular reset path for client streams; the pending outbound + // header and data for the reset stream are discarded immediately. + EXPECT_CALL(request_decoder_, decodeData(_, _)).Times(0); ON_CALL(client_connection_, write(_, _)) .WillByDefault( Invoke([&](Buffer::Instance& data, bool) -> void { server_wrapper_.buffer_.add(data); })); TestRequestHeaderMapImpl request_headers; HttpTestUtility::addDefaultHeaders(request_headers); EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, false).ok()); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - EXPECT_CALL(client_stream_callbacks, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); - request_encoder_->encodeData(body, true); - EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::LocalReset, _)); - request_encoder_->getStream().resetStream(StreamResetReason::LocalReset); // Dispatch server. We expect to see some data. - EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0); EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() -> void { - // Start a response inside the headers callback. This should not result in the client - // seeing any headers as the stream should already be reset on the other side, even though - // we don't know about it yet. TestResponseHeaderMapImpl response_headers{{":status", "200"}}; response_encoder_->encodeHeaders(response_headers, false); })); - EXPECT_CALL(request_decoder_, decodeData(_, false)).Times(AtLeast(1)); - EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); + EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).WillOnce(InvokeWithoutArgs([&]() -> void { + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + EXPECT_CALL(client_stream_callbacks, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + request_encoder_->encodeData(body, true); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::RemoteReset, _)); + request_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + })); setupDefaultConnectionMocks(); + EXPECT_NE(0, server_wrapper_.buffer_.length()); auto status = server_wrapper_.dispatch(Buffer::OwnedImpl(), *server_); EXPECT_TRUE(status.ok()); + EXPECT_EQ(0, server_wrapper_.buffer_.length()); } -TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServer) { +TEST_P(Http2CodecImplDeferredResetTest, DeferredResetServerIfLocalEndStreamBeforeReset) { initialize(); InSequence s; TestRequestHeaderMapImpl request_headers; HttpTestUtility::addDefaultHeaders(request_headers); - EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() { + // Encode headers, encode data and send reset stream from the call stack of decodeHeaders in + // order to delay sendPendingFrames processing in those calls until the end of dispatch. The + // delayed sendPendingFrames processing allows us to verify that resetStream calls go down the + // deferred reset path if there are pending data frames with local end_stream set. + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke( + [&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + Buffer::OwnedImpl body(std::string(32 * 1024, 'a')); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + response_encoder_->encodeData(body, true); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(*flush_timer, disableTimer()); + response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + })); EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, false).ok()); - // In this case we do the same thing as DeferredResetClient but on the server side. - ON_CALL(server_connection_, write(_, _)) - .WillByDefault( - Invoke([&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); - TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - response_encoder_->encodeHeaders(response_headers, false); - Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); - EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); - auto flush_timer = new NiceMock(&server_connection_.dispatcher_); - EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); - response_encoder_->encodeData(body, true); - EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); - EXPECT_CALL(*flush_timer, disableTimer()); - response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AnyNumber()); + EXPECT_CALL(response_decoder_, decodeData(_, true)); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + setupDefaultConnectionMocks(); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); +} + +TEST_P(Http2CodecImplDeferredResetTest, LargeDataDeferredResetServerIfLocalEndStreamBeforeReset) { + initialize(); + + InSequence s; + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() { + // Encode headers, encode data and send reset stream from the call stack of decodeHeaders in + // order to delay sendPendingFrames processing in those calls until the end of dispatch. The + // delayed sendPendingFrames processing allows us to verify that resetStream calls go down the + // deferred reset path if there are pending data frames with local end_stream set. + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke( + [&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + auto flush_timer = new Event::MockTimer(&server_connection_.dispatcher_); + EXPECT_CALL(*flush_timer, enableTimer(std::chrono::milliseconds(30000), _)); + response_encoder_->encodeData(body, true); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); + EXPECT_CALL(*flush_timer, disableTimer()); + response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + })); + EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, false).ok()); MockStreamCallbacks client_stream_callbacks; request_encoder_->getStream().addCallbacks(client_stream_callbacks); EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)); - EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AtLeast(1)); + EXPECT_CALL(response_decoder_, decodeData(_, false)).Times(AnyNumber()); + EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); + setupDefaultConnectionMocks(); + auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); + EXPECT_TRUE(status.ok()); +} + +TEST_P(Http2CodecImplDeferredResetTest, NoDeferredResetServerIfResetBeforeLocalEndStream) { + initialize(); + + InSequence s; + + TestRequestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)).WillOnce(InvokeWithoutArgs([&]() { + // Encode headers, encode data and send reset stream from the call stack of decodeHeaders in + // order to delay sendPendingFrames processing in those calls until the end of dispatch. The + // call to resetStream goes down the regular reset path since local end_stream is not set; the + // pending outbound header and data for the reset stream are discarded immediately. + ON_CALL(server_connection_, write(_, _)) + .WillByDefault(Invoke( + [&](Buffer::Instance& data, bool) -> void { client_wrapper_.buffer_.add(data); })); + TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + response_encoder_->encodeHeaders(response_headers, false); + Buffer::OwnedImpl body(std::string(1024 * 1024, 'a')); + EXPECT_CALL(server_stream_callbacks_, onAboveWriteBufferHighWatermark()).Times(AnyNumber()); + response_encoder_->encodeData(body, false); + EXPECT_CALL(server_stream_callbacks_, onResetStream(StreamResetReason::LocalReset, _)); + response_encoder_->getStream().resetStream(StreamResetReason::LocalReset); + })); + EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, false).ok()); + + MockStreamCallbacks client_stream_callbacks; + request_encoder_->getStream().addCallbacks(client_stream_callbacks); + EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0); + EXPECT_CALL(response_decoder_, decodeData(_, _)).Times(0); EXPECT_CALL(client_stream_callbacks, onResetStream(StreamResetReason::RemoteReset, _)); setupDefaultConnectionMocks(); auto status = client_wrapper_.dispatch(Buffer::OwnedImpl(), *client_); diff --git a/test/common/http/http2/http2_frame.cc b/test/common/http/http2/http2_frame.cc index 4a1c4b17b7673..2f6dfeacb8785 100644 --- a/test/common/http/http2/http2_frame.cc +++ b/test/common/http/http2/http2_frame.cc @@ -126,8 +126,22 @@ Http2Frame Http2Frame::makePingFrame(absl::string_view data) { } Http2Frame Http2Frame::makeEmptySettingsFrame(SettingsFlags flags) { + return makeSettingsFrame(flags, {}); +} + +Http2Frame Http2Frame::makeSettingsFrame(SettingsFlags flags, + std::list> settings) { Http2Frame frame; frame.buildHeader(Type::Settings, 0, static_cast(flags)); + for (auto& item : settings) { + frame.data_.push_back((item.first >> 8) & 0xff); + frame.data_.push_back(item.first & 0xff); + frame.data_.push_back((item.second >> 24) & 0xff); + frame.data_.push_back((item.second >> 16) & 0xff); + frame.data_.push_back((item.second >> 8) & 0xff); + frame.data_.push_back(item.second & 0xff); + } + frame.setPayloadSize(6 * settings.size()); return frame; } diff --git a/test/common/http/http2/http2_frame.h b/test/common/http/http2/http2_frame.h index 54f8fe5491bbf..fbb3f5a96ae12 100644 --- a/test/common/http/http2/http2_frame.h +++ b/test/common/http/http2/http2_frame.h @@ -129,6 +129,8 @@ class Http2Frame { // Methods for creating HTTP2 frames static Http2Frame makePingFrame(absl::string_view data = {}); static Http2Frame makeEmptySettingsFrame(SettingsFlags flags = SettingsFlags::None); + static Http2Frame makeSettingsFrame(SettingsFlags flags, + std::list> settings); static Http2Frame makeEmptyHeadersFrame(uint32_t stream_index, HeadersFlags flags = HeadersFlags::None); static Http2Frame makeHeadersFrameNoStatus(uint32_t stream_index); diff --git a/test/common/http/http2/metadata_encoder_decoder_test.cc b/test/common/http/http2/metadata_encoder_decoder_test.cc index d954778be1719..be7ca1bc7e320 100644 --- a/test/common/http/http2/metadata_encoder_decoder_test.cc +++ b/test/common/http/http2/metadata_encoder_decoder_test.cc @@ -182,6 +182,20 @@ TEST_F(MetadataEncoderDecoderTest, TestMetadataSizeLimit) { cleanUp(); } +TEST_F(MetadataEncoderDecoderTest, TestTotalPayloadSize) { + initialize([](MetadataMapPtr&&) {}); + + const std::string payload = std::string(1024, 'a'); + EXPECT_EQ(0, decoder_->totalPayloadSize()); + EXPECT_TRUE( + decoder_->receiveMetadata(reinterpret_cast(payload.data()), payload.size())); + EXPECT_EQ(payload.size(), decoder_->totalPayloadSize()); + EXPECT_TRUE( + decoder_->receiveMetadata(reinterpret_cast(payload.data()), payload.size())); + EXPECT_EQ(2 * payload.size(), decoder_->totalPayloadSize()); + cleanUp(); +} + TEST_F(MetadataEncoderDecoderTest, TestDecodeBadData) { MetadataMap metadata_map = { {"header_key1", "header_value1"}, diff --git a/test/common/http/inline_cookie_test.cc b/test/common/http/inline_cookie_test.cc new file mode 100644 index 0000000000000..5d8269f81975e --- /dev/null +++ b/test/common/http/inline_cookie_test.cc @@ -0,0 +1,61 @@ +#include "source/common/http/header_map_impl.h" +#include "source/common/http/header_utility.h" + +#include "test/mocks/runtime/mocks.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Http { +namespace { + +// Test that the cookie header can work correctly after being registered as an inline header. The +// test will register the cookie as an inline header. In order to avoid affecting other tests, the +// test is placed in this separate source file. +TEST(InlineCookieTest, InlineCookieTest) { + Http::CustomInlineHeaderRegistry::registerInlineHeader( + Http::Headers::get().Cookie); + Http::CustomInlineHeaderRegistry::registerInlineHeader( + Http::LowerCaseString("header_for_compare")); + + auto mock_snapshot = std::make_shared>(); + testing::NiceMock mock_loader; + Runtime::LoaderSingleton::initialize(&mock_loader); + + { + // Enable 'envoy.reloadable_features.header_map_correctly_coalesce_cookies' feature. + ON_CALL(mock_loader, threadsafeSnapshot()).WillByDefault(testing::Return(mock_snapshot)); + ON_CALL(*mock_snapshot, runtimeFeatureEnabled(_)).WillByDefault(testing::Return(true)); + + Http::TestRequestHeaderMapImpl headers{{"cookie", "key1:value1"}, + {"cookie", "key2:value2"}, + {"header_for_compare", "value1"}, + {"header_for_compare", "value2"}}; + + // Delimiter for inline 'cookie' header is specialized '; '. + EXPECT_EQ("key1:value1; key2:value2", headers.get_("cookie")); + // Delimiter for inline 'header_for_compare' header is default ','. + EXPECT_EQ("value1,value2", headers.get_("header_for_compare")); + } + + { + // Disable 'envoy.reloadable_features.header_map_correctly_coalesce_cookies' feature. + ON_CALL(mock_loader, threadsafeSnapshot()).WillByDefault(testing::Return(mock_snapshot)); + ON_CALL(*mock_snapshot, runtimeFeatureEnabled(_)).WillByDefault(testing::Return(false)); + + Http::TestRequestHeaderMapImpl headers{{"cookie", "key1:value1"}, + {"cookie", "key2:value2"}, + {"header_for_compare", "value1"}, + {"header_for_compare", "value2"}}; + + // 'envoy.reloadable_features.header_map_correctly_coalesce_cookies' is disabled then default + // ',' will be used as delimiter. + EXPECT_EQ("key1:value1,key2:value2", headers.get_("cookie")); + EXPECT_EQ("value1,value2", headers.get_("header_for_compare")); + } +} + +} // namespace +} // namespace Http +} // namespace Envoy diff --git a/test/common/http/match_wrapper/config_test.cc b/test/common/http/match_wrapper/config_test.cc index f89f1a1f458a6..8287d9e5fd2d0 100644 --- a/test/common/http/match_wrapper/config_test.cc +++ b/test/common/http/match_wrapper/config_test.cc @@ -57,7 +57,7 @@ TEST(MatchWrapper, DisabledByDefault) { name: test typed_config: "@type": type.googleapis.com/google.protobuf.StringValue -matcher: +xds_matcher: matcher_tree: input: name: request-headers @@ -90,6 +90,57 @@ TEST(MatchWrapper, WithMatcher) { NiceMock factory_context; + const auto config = + TestUtility::parseYaml(R"EOF( +extension_config: + name: test + typed_config: + "@type": type.googleapis.com/google.protobuf.StringValue +xds_matcher: + matcher_tree: + input: + name: request-headers + typed_config: + "@type": type.googleapis.com/envoy.type.matcher.v3.HttpRequestHeaderMatchInput + header_name: default-matcher-header + exact_match_map: + map: + match: + action: + name: skip + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.common.matcher.action.v3.SkipFilter +)EOF"); + + MatchWrapperConfig match_wrapper_config; + auto cb = match_wrapper_config.createFilterFactoryFromProto(config, "", factory_context); + + Envoy::Http::MockFilterChainFactoryCallbacks factory_callbacks; + testing::InSequence s; + + // This matches the sequence of calls in the filter factory above: the ones that call the overload + // without a match tree has a match tree added, the other one does not. + EXPECT_CALL(factory_callbacks, addStreamDecoderFilter(_, testing::NotNull())); + EXPECT_CALL(factory_callbacks, addStreamEncoderFilter(_, testing::NotNull())); + EXPECT_CALL(factory_callbacks, addStreamFilter(_, testing::NotNull())); + EXPECT_CALL(factory_callbacks, addStreamDecoderFilter(_, testing::IsNull())); + EXPECT_CALL(factory_callbacks, addStreamEncoderFilter(_, testing::IsNull())); + EXPECT_CALL(factory_callbacks, addStreamFilter(_, testing::IsNull())); + EXPECT_CALL(factory_callbacks, addAccessLogHandler(_)); + cb(factory_callbacks); +} + +TEST(MatchWrapper, DEPRECATED_FEATURE_TEST(WithDeprecatedMatcher)) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.experimental_matching_api", "true"}}); + + TestFactory test_factory; + Envoy::Registry::InjectFactory + inject_factory(test_factory); + + NiceMock factory_context; + const auto config = TestUtility::parseYaml(R"EOF( extension_config: @@ -130,6 +181,31 @@ TEST(MatchWrapper, WithMatcher) { cb(factory_callbacks); } +TEST(MatchWrapper, WithNoMatcher) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.experimental_matching_api", "true"}}); + + TestFactory test_factory; + Envoy::Registry::InjectFactory + inject_factory(test_factory); + + NiceMock factory_context; + + const auto config = + TestUtility::parseYaml(R"EOF( +extension_config: + name: test + typed_config: + "@type": type.googleapis.com/google.protobuf.StringValue +)EOF"); + + MatchWrapperConfig match_wrapper_config; + EXPECT_THROW_WITH_REGEX( + match_wrapper_config.createFilterFactoryFromProto(config, "", factory_context), + EnvoyException, "one of `matcher` and `matcher_tree` must be set."); +} + TEST(MatchWrapper, WithMatcherInvalidDataInput) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( @@ -147,7 +223,7 @@ TEST(MatchWrapper, WithMatcherInvalidDataInput) { name: test typed_config: "@type": type.googleapis.com/google.protobuf.StringValue -matcher: +xds_matcher: matcher_tree: input: name: request-headers diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index 62fcd5f17ff0b..766fd52200b57 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -310,10 +310,9 @@ TEST(HttpUtility, createSslRedirectPath) { namespace { -envoy::config::core::v3::Http2ProtocolOptions -parseHttp2OptionsFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { +envoy::config::core::v3::Http2ProtocolOptions parseHttp2OptionsFromV3Yaml(const std::string& yaml) { envoy::config::core::v3::Http2ProtocolOptions http2_options; - TestUtility::loadFromYamlAndValidate(yaml, http2_options, false, avoid_boosting); + TestUtility::loadFromYamlAndValidate(yaml, http2_options); return ::Envoy::Http2::Utility::initializeAndValidateOptions(http2_options); } @@ -549,6 +548,16 @@ TEST(HttpUtility, TestParseCookie) { EXPECT_EQ(value, "abc123"); } +TEST(HttpUtility, TestParseCookieDuplicates) { + TestRequestHeaderMapImpl headers{{"someheader", "10.0.0.1"}, + {"cookie", "a=; b=1; a=2"}, + {"cookie", "a=3; b=2"}, + {"cookie", "b=3"}}; + + EXPECT_EQ(Utility::parseCookieValue(headers, "a"), ""); + EXPECT_EQ(Utility::parseCookieValue(headers, "b"), "1"); +} + TEST(HttpUtility, TestParseSetCookie) { TestRequestHeaderMapImpl headers{ {"someheader", "10.0.0.1"}, @@ -598,6 +607,33 @@ TEST(HttpUtility, TestParseCookieWithQuotes) { EXPECT_EQ(Utility::parseCookieValue(headers, "leadingdquote"), "\"foobar"); } +TEST(HttpUtility, TestParseCookies) { + TestRequestHeaderMapImpl headers{ + {"someheader", "10.0.0.1"}, + {"cookie", "dquote=\"; quoteddquote=\"\"\""}, + {"cookie", "leadingdquote=\"foobar;"}, + {"cookie", "abc=def; token=\"abc123\"; Expires=Wed, 09 Jun 2021 10:18:14 GMT"}}; + + const auto& cookies = Utility::parseCookies(headers); + + EXPECT_EQ(cookies.at("token"), "abc123"); + EXPECT_EQ(cookies.at("dquote"), "\""); + EXPECT_EQ(cookies.at("quoteddquote"), "\""); + EXPECT_EQ(cookies.at("leadingdquote"), "\"foobar"); +} + +TEST(HttpUtility, TestParseCookiesDuplicates) { + TestRequestHeaderMapImpl headers{{"someheader", "10.0.0.1"}, + {"cookie", "a=; b=1; a=2"}, + {"cookie", "a=3; b=2"}, + {"cookie", "b=3"}}; + + const auto& cookies = Utility::parseCookies(headers); + + EXPECT_EQ(cookies.at("a"), ""); + EXPECT_EQ(cookies.at("b"), "1"); +} + TEST(HttpUtility, TestParseSetCookieWithQuotes) { TestRequestHeaderMapImpl headers{ {"someheader", "10.0.0.1"}, @@ -892,9 +928,7 @@ TEST(HttpUtility, ResolveMostSpecificPerFilterConfig) { const std::string filter_name = "envoy.filter"; NiceMock filter_callbacks; - const Router::RouteSpecificFilterConfig one; - const Router::RouteSpecificFilterConfig two; - const Router::RouteSpecificFilterConfig three; + const Router::RouteSpecificFilterConfig config; // Test when there's nothing on the route EXPECT_EQ(nullptr, Utility::resolveMostSpecificPerFilterConfig( @@ -902,87 +936,19 @@ TEST(HttpUtility, ResolveMostSpecificPerFilterConfig) { // Testing in reverse order, so that the method always returns the last object. // Testing per-virtualhost typed filter config - ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name)) - .WillByDefault(Return(&one)); - EXPECT_EQ(&one, Utility::resolveMostSpecificPerFilterConfig( - filter_name, filter_callbacks.route())); - - // Testing per-route typed filter config - ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)).WillByDefault(Return(&two)); - ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name)) - .WillByDefault(Invoke( - [&](const std::string& name) { return filter_callbacks.route_->perFilterConfig(name); })); - EXPECT_EQ(&two, Utility::resolveMostSpecificPerFilterConfig( - filter_name, filter_callbacks.route())); - - // Testing per-route entry typed filter config - ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name)) - .WillByDefault(Return(&three)); - EXPECT_EQ(&three, Utility::resolveMostSpecificPerFilterConfig( - filter_name, filter_callbacks.route())); + ON_CALL(*filter_callbacks.route_, mostSpecificPerFilterConfig(filter_name)) + .WillByDefault(Return(&config)); + EXPECT_EQ(&config, Utility::resolveMostSpecificPerFilterConfig( + filter_name, filter_callbacks.route())); // Cover the case of no route entry ON_CALL(*filter_callbacks.route_, routeEntry()).WillByDefault(Return(nullptr)); - EXPECT_EQ(nullptr, Utility::resolveMostSpecificPerFilterConfig( + ON_CALL(*filter_callbacks.route_, mostSpecificPerFilterConfig(filter_name)) + .WillByDefault(Return(&config)); + EXPECT_EQ(&config, Utility::resolveMostSpecificPerFilterConfig( filter_name, filter_callbacks.route())); } -// Verify that traversePerFilterConfigGeneric traverses in the order of specificity. -TEST(HttpUtility, TraversePerFilterConfigIteratesInOrder) { - const std::string filter_name = "envoy.filter"; - NiceMock filter_callbacks; - - // Create configs to test; to ease of testing instead of using real objects - // we will use pointers that are actually indexes. - const std::vector nullconfigs(5); - size_t num_configs = 1; - ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name)) - .WillByDefault(Return(&nullconfigs[num_configs])); - num_configs++; - ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)) - .WillByDefault(Return(&nullconfigs[num_configs])); - num_configs++; - ON_CALL(filter_callbacks.route_->route_entry_, perFilterConfig(filter_name)) - .WillByDefault(Return(&nullconfigs[num_configs])); - - // a vector to save which configs are visited by the traversePerFilterConfigGeneric - std::vector visited_configs(num_configs, 0); - - // Iterate; save the retrieved config index in the iteration index in visited_configs. - size_t index = 0; - Utility::traversePerFilterConfigGeneric(filter_name, filter_callbacks.route(), - [&](const Router::RouteSpecificFilterConfig& cfg) { - int cfg_index = &cfg - nullconfigs.data(); - visited_configs[index] = cfg_index - 1; - index++; - }); - - // Make sure all methods were called, and in order. - for (size_t i = 0; i < visited_configs.size(); i++) { - EXPECT_EQ(i, visited_configs[i]); - } -} - -// Verify that traversePerFilterConfig works and we get back the original type. -TEST(HttpUtility, TraversePerFilterConfigTyped) { - TestConfig testConfig; - - const std::string filter_name = "envoy.filter"; - NiceMock filter_callbacks; - - // make the file callbacks return our test config - ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)) - .WillByDefault(Return(&testConfig)); - - // iterate the configs - size_t index = 0; - Utility::traversePerFilterConfig(filter_name, filter_callbacks.route(), - [&](const TestConfig&) { index++; }); - - // make sure that the callback was called (which means that the dynamic_cast worked.) - EXPECT_EQ(1, index); -} - // Verify that merging works as expected and we get back the merged result. TEST(HttpUtility, GetMergedPerFilterConfig) { TestConfig baseTestConfig, routeTestConfig; @@ -993,11 +959,12 @@ TEST(HttpUtility, GetMergedPerFilterConfig) { const std::string filter_name = "envoy.filter"; NiceMock filter_callbacks; - // make the file callbacks return our test config - ON_CALL(filter_callbacks.route_->route_entry_.virtual_host_, perFilterConfig(filter_name)) - .WillByDefault(Return(&baseTestConfig)); - ON_CALL(*filter_callbacks.route_, perFilterConfig(filter_name)) - .WillByDefault(Return(&routeTestConfig)); + EXPECT_CALL(*filter_callbacks.route_, traversePerFilterConfig(filter_name, _)) + .WillOnce(Invoke([&](const std::string&, + std::function cb) { + cb(baseTestConfig); + cb(routeTestConfig); + })); // merge the configs auto merged_cfg = Utility::getMergedPerFilterConfig( diff --git a/test/common/json/json_fuzz_test.cc b/test/common/json/json_fuzz_test.cc index bcc062f3c460b..7e2f40b89c6f5 100644 --- a/test/common/json/json_fuzz_test.cc +++ b/test/common/json/json_fuzz_test.cc @@ -39,10 +39,10 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { // round-trip. std::string yaml = MessageUtil::getYamlStringFromMessage(message); ProtobufWkt::Struct yaml_message; - MessageUtil::loadFromYaml(yaml, yaml_message); + TestUtility::loadFromYaml(yaml, yaml_message); ProtobufWkt::Struct message3; - MessageUtil::loadFromYaml(MessageUtil::getYamlStringFromMessage(yaml_message), message3); + TestUtility::loadFromYaml(MessageUtil::getYamlStringFromMessage(yaml_message), message3); FUZZ_ASSERT(TestUtility::protoEqual(yaml_message, message3)); } catch (const Envoy::EnvoyException& e) { ENVOY_LOG_MISC(debug, "Failed due to {}", e.what()); diff --git a/test/common/network/BUILD b/test/common/network/BUILD index ce5f097cd5f5e..568cf67d26bab 100644 --- a/test/common/network/BUILD +++ b/test/common/network/BUILD @@ -97,6 +97,17 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "happy_eyeballs_connection_impl_test", + srcs = ["happy_eyeballs_connection_impl_test.cc"], + deps = [ + "//source/common/network:happy_eyeballs_connection_impl_lib", + "//source/common/network:socket_option_lib", + "//test/mocks/event:event_mocks", + "//test/mocks/network:network_mocks", + ], +) + envoy_cc_test( name = "apple_dns_impl_test", srcs = select({ @@ -311,7 +322,7 @@ envoy_cc_test( "//test/test_common:network_utility_lib", "//test/test_common:threadsafe_singleton_injector_lib", "//test/test_common:utility_lib", - "@com_googlesource_quiche//:quic_test_tools_mock_syscall_wrapper_lib", + "@com_github_google_quiche//:quic_test_tools_mock_syscall_wrapper_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/test/common/network/apple_dns_impl_test.cc b/test/common/network/apple_dns_impl_test.cc index 552428d83abe9..feb6b0ac89dda 100644 --- a/test/common/network/apple_dns_impl_test.cc +++ b/test/common/network/apple_dns_impl_test.cc @@ -271,13 +271,17 @@ class AppleDnsImplFakeApiTest : public testing::Test { EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, _, _, _, _, _, _)) .WillOnce(Return(error_code)); - EXPECT_EQ(nullptr, resolver_->resolve( - "foo.com", Network::DnsLookupFamily::Auto, - [](DnsResolver::ResolutionStatus, std::list&&) -> void { - // This callback should never be executed. - FAIL(); - })); - + bool callback_called = false; + EXPECT_EQ(nullptr, resolver_->resolve("foo.com", Network::DnsLookupFamily::Auto, + [&](DnsResolver::ResolutionStatus status, + std::list&& responses) -> void { + EXPECT_EQ(DnsResolver::ResolutionStatus::Failure, + status); + EXPECT_TRUE(responses.empty()); + callback_called = true; + })); + + EXPECT_TRUE(callback_called); checkErrorStat(error_code); } diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 39262ba7164e8..a7169ee003047 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -47,6 +47,7 @@ using testing::Optional; using testing::Return; using testing::SaveArg; using testing::Sequence; +using testing::StartsWith; using testing::StrictMock; namespace Envoy { @@ -138,13 +139,13 @@ class ConnectionImplTest : public testing::TestWithParam { Network::Test::getCanonicalLoopbackAddress(GetParam())); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = std::make_unique( - *dispatcher_, socket_->addressProvider().localAddress(), source_address_, + *dispatcher_, socket_->connectionInfoProvider().localAddress(), source_address_, Network::Test::createRawBufferSocket(), socket_options_); client_connection_->addConnectionCallbacks(client_callbacks_); EXPECT_EQ(nullptr, client_connection_->ssl()); const Network::ClientConnection& const_connection = *client_connection_; EXPECT_EQ(nullptr, const_connection.ssl()); - EXPECT_FALSE(client_connection_->addressProvider().localAddressRestored()); + EXPECT_FALSE(client_connection_->connectionInfoProvider().localAddressRestored()); } void connect() { @@ -377,7 +378,7 @@ TEST_P(ConnectionImplTest, ImmediateConnectError) { Address::InstanceConstSharedPtr broadcast_address; socket_ = std::make_shared( Network::Test::getCanonicalLoopbackAddress(GetParam())); - if (socket_->addressProvider().localAddress()->ip()->version() == Address::IpVersion::v4) { + if (socket_->connectionInfoProvider().localAddress()->ip()->version() == Address::IpVersion::v4) { broadcast_address = std::make_shared("224.0.0.1", 0); } else { broadcast_address = std::make_shared("ff02::1", 0); @@ -392,6 +393,8 @@ TEST_P(ConnectionImplTest, ImmediateConnectError) { EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_THAT(client_connection_->transportFailureReason(), StartsWith("immediate connect error")); } TEST_P(ConnectionImplTest, SetServerTransportSocketTimeout) { @@ -409,7 +412,9 @@ TEST_P(ConnectionImplTest, SetServerTransportSocketTimeout) { std::move(mocks.transport_socket_), stream_info_, true); EXPECT_CALL(*mock_timer, enableTimer(std::chrono::milliseconds(3 * 1000), _)); - server_connection->setTransportSocketConnectTimeout(std::chrono::seconds(3)); + Stats::MockCounter timeout_counter; + EXPECT_CALL(timeout_counter, inc()); + server_connection->setTransportSocketConnectTimeout(std::chrono::seconds(3), timeout_counter); EXPECT_CALL(*transport_socket, closeSocket(ConnectionEvent::LocalClose)); mock_timer->invokeCallback(); EXPECT_THAT(stream_info_.connectionTerminationDetails(), @@ -429,7 +434,9 @@ TEST_P(ConnectionImplTest, SetServerTransportSocketTimeoutAfterConnect) { transport_socket->callbacks_->raiseEvent(ConnectionEvent::Connected); // This should be a no-op. No timer should be created. EXPECT_CALL(*mocks.dispatcher_, createTimer_(_)).Times(0); - server_connection->setTransportSocketConnectTimeout(std::chrono::seconds(3)); + Stats::MockCounter timeout_counter; + EXPECT_CALL(timeout_counter, inc()).Times(0); + server_connection->setTransportSocketConnectTimeout(std::chrono::seconds(3), timeout_counter); server_connection->close(ConnectionCloseType::NoFlush); } @@ -452,7 +459,9 @@ TEST_P(ConnectionImplTest, ServerTransportSocketTimeoutDisabledOnConnect) { mock_timer->timer_destroyed_ = &timer_destroyed; EXPECT_CALL(*mock_timer, enableTimer(std::chrono::milliseconds(3 * 1000), _)); - server_connection->setTransportSocketConnectTimeout(std::chrono::seconds(3)); + Stats::MockCounter timeout_counter; + EXPECT_CALL(timeout_counter, inc()).Times(0); + server_connection->setTransportSocketConnectTimeout(std::chrono::seconds(3), timeout_counter); transport_socket->callbacks_->raiseEvent(ConnectionEvent::Connected); EXPECT_TRUE(timer_destroyed); @@ -490,7 +499,7 @@ TEST_P(ConnectionImplTest, SocketOptions) { server_connection_->addReadFilter(read_filter_); upstream_connection_ = dispatcher_->createClientConnection( - socket_->addressProvider().localAddress(), source_address_, + socket_->connectionInfoProvider().localAddress(), source_address_, Network::Test::createRawBufferSocket(), server_connection_->socketOptions()); })); @@ -539,7 +548,7 @@ TEST_P(ConnectionImplTest, SocketOptionsFailureTest) { server_connection_->addReadFilter(read_filter_); upstream_connection_ = dispatcher_->createClientConnection( - socket_->addressProvider().localAddress(), source_address_, + socket_->connectionInfoProvider().localAddress(), source_address_, Network::Test::createRawBufferSocket(), server_connection_->socketOptions()); upstream_connection_->addConnectionCallbacks(upstream_callbacks_); })); @@ -1295,7 +1304,7 @@ TEST_P(ConnectionImplTest, BindTest) { setUpBasicConnection(); connect(); EXPECT_EQ(address_string, - server_connection_->addressProvider().remoteAddress()->ip()->addressAsString()); + server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString()); disconnect(true); } @@ -1314,7 +1323,7 @@ TEST_P(ConnectionImplTest, BindFromSocketTest) { auto option = std::make_shared>(); EXPECT_CALL(*option, setOption(_, Eq(envoy::config::core::v3::SocketOption::STATE_PREBIND))) .WillOnce(Invoke([&](Socket& socket, envoy::config::core::v3::SocketOption::SocketState) { - socket.addressProvider().setLocalAddress(new_source_address); + socket.connectionInfoProvider().setLocalAddress(new_source_address); return true; })); @@ -1323,10 +1332,11 @@ TEST_P(ConnectionImplTest, BindFromSocketTest) { setUpBasicConnection(); connect(); EXPECT_EQ(address_string, - server_connection_->addressProvider().remoteAddress()->ip()->addressAsString()); + server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString()); disconnect(true); } + TEST_P(ConnectionImplTest, BindFailureTest) { // Swap the constraints from BindTest to create an address family mismatch. if (GetParam() == Network::Address::IpVersion::v6) { @@ -1344,7 +1354,7 @@ TEST_P(ConnectionImplTest, BindFailureTest) { listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = dispatcher_->createClientConnection( - socket_->addressProvider().localAddress(), source_address_, + socket_->connectionInfoProvider().localAddress(), source_address_, Network::Test::createRawBufferSocket(), nullptr); MockConnectionStats connection_stats; @@ -1353,6 +1363,7 @@ TEST_P(ConnectionImplTest, BindFailureTest) { EXPECT_CALL(connection_stats.bind_errors_, inc()); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_THAT(client_connection_->transportFailureReason(), StartsWith("failed to bind to")); } // ReadOnCloseTest verifies that the read filter's onData function is invoked with available data @@ -1912,7 +1923,7 @@ TEST_P(ConnectionImplTest, NetworkSocketDumpsWithoutAllocatingMemory) { const auto contents = ostream.contents(); EXPECT_THAT(contents, HasSubstr("ListenSocketImpl")); EXPECT_THAT(contents, HasSubstr("transport_protocol_: ")); - EXPECT_THAT(contents, HasSubstr("SocketAddressSetterImpl")); + EXPECT_THAT(contents, HasSubstr("ConnectionInfoSetterImpl")); if (GetParam() == Network::Address::IpVersion::v4) { EXPECT_THAT( contents, @@ -2813,9 +2824,10 @@ class ReadBufferLimitTest : public ConnectionImplTest { Network::Test::getCanonicalLoopbackAddress(GetParam())); listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); - client_connection_ = dispatcher_->createClientConnection( - socket_->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), - Network::Test::createRawBufferSocket(), nullptr); + client_connection_ = + dispatcher_->createClientConnection(socket_->connectionInfoProvider().localAddress(), + Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr); client_connection_->addConnectionCallbacks(client_callbacks_); client_connection_->connect(); @@ -2915,6 +2927,7 @@ TEST_P(TcpClientConnectionImplTest, BadConnectConnRefused) { connection->connect(); connection->noDelay(true); dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_THAT(connection->transportFailureReason(), StartsWith("delayed connect error")); } class PipeClientConnectionImplTest : public testing::Test { diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index ff513d7128108..20251102575f8 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -35,7 +35,7 @@ #include #include #else -#include "nameser.h" +#include "ares_nameser.h" #endif using testing::_; @@ -441,11 +441,13 @@ class DnsImplTest : public testing::TestWithParam { Network::Test::getCanonicalLoopbackAddress(GetParam())); listener_ = dispatcher_->createListener(socket_, *server_, true); updateDnsResolverOptions(); + // Create a resolver options on stack here to emulate what actually happens in envoy bootstrap. + envoy::config::core::v3::DnsResolverOptions dns_resolver_options = dns_resolver_options_; if (setResolverInConstructor()) { - resolver_ = dispatcher_->createDnsResolver({socket_->addressProvider().localAddress()}, - dns_resolver_options_); + resolver_ = dispatcher_->createDnsResolver({socket_->connectionInfoProvider().localAddress()}, + dns_resolver_options); } else { - resolver_ = dispatcher_->createDnsResolver({}, dns_resolver_options_); + resolver_ = dispatcher_->createDnsResolver({}, dns_resolver_options); } // Point c-ares at the listener with no search domains and TCP-only. @@ -453,8 +455,8 @@ class DnsImplTest : public testing::TestWithParam { if (tcpOnly()) { peer_->resetChannelTcpOnly(zeroTimeout()); } - ares_set_servers_ports_csv(peer_->channel(), - socket_->addressProvider().localAddress()->asString().c_str()); + ares_set_servers_ports_csv( + peer_->channel(), socket_->connectionInfoProvider().localAddress()->asString().c_str()); } void TearDown() override { @@ -589,7 +591,7 @@ TEST_P(DnsImplTest, DestructCallback) { // a subsequent result to call ares_destroy. peer_->resetChannelTcpOnly(zeroTimeout()); ares_set_servers_ports_csv(peer_->channel(), - socket_->addressProvider().localAddress()->asString().c_str()); + socket_->connectionInfoProvider().localAddress()->asString().c_str()); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -717,7 +719,7 @@ TEST_P(DnsImplTest, DestroyChannelOnRefused) { peer_->resetChannelTcpOnly(zeroTimeout()); } ares_set_servers_ports_csv(peer_->channel(), - socket_->addressProvider().localAddress()->asString().c_str()); + socket_->connectionInfoProvider().localAddress()->asString().c_str()); EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::Auto, DnsResolver::ResolutionStatus::Success, diff --git a/test/common/network/filter_matcher_test.cc b/test/common/network/filter_matcher_test.cc index 30208c83dc79b..dbb613f082a6d 100644 --- a/test/common/network/filter_matcher_test.cc +++ b/test/common/network/filter_matcher_test.cc @@ -22,7 +22,7 @@ class ListenerFilterMatcherTest : public testing::Test { auto handle = std::make_unique(); handle->socket_ = std::make_unique(); handle->callback_ = std::make_unique(); - handle->socket_->address_provider_->setLocalAddress( + handle->socket_->connection_info_provider_->setLocalAddress( std::make_shared("127.0.0.1", port)); EXPECT_CALL(*(handle->callback_), socket()).WillRepeatedly(ReturnRef(*(handle->socket_))); return handle; diff --git a/test/common/network/happy_eyeballs_connection_impl_test.cc b/test/common/network/happy_eyeballs_connection_impl_test.cc new file mode 100644 index 0000000000000..743fa110a606d --- /dev/null +++ b/test/common/network/happy_eyeballs_connection_impl_test.cc @@ -0,0 +1,1050 @@ +#include "source/common/network/address_impl.h" +#include "source/common/network/happy_eyeballs_connection_impl.h" +#include "source/common/network/transport_socket_options_impl.h" + +#include "test/mocks/event/mocks.h" +#include "test/mocks/network/connection.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/network/transport_socket.h" +#include "test/mocks/stream_info/mocks.h" + +using testing::Return; +using testing::ReturnRef; +using testing::StrictMock; + +namespace Envoy { +namespace Network { + +class HappyEyeballsConnectionImplTest : public testing::Test { +public: + HappyEyeballsConnectionImplTest() + : failover_timer_(new testing::StrictMock(&dispatcher_)), + transport_socket_options_(std::make_shared()), + options_(std::make_shared()), + address_list_({std::make_shared("127.0.0.1"), + std::make_shared("127.0.0.2"), + std::make_shared("127.0.0.3")}) { + EXPECT_CALL(transport_socket_factory_, createTransportSocket(_)); + EXPECT_CALL(dispatcher_, createClientConnection_(address_list_[0], _, _, _)) + .WillOnce(testing::InvokeWithoutArgs( + this, &HappyEyeballsConnectionImplTest::createNextConnection)); + + next_connections_.push_back(std::make_unique>()); + impl_ = std::make_unique( + dispatcher_, address_list_, Address::InstanceConstSharedPtr(), transport_socket_factory_, + transport_socket_options_, options_); + } + + // Called by the dispatcher to return a MockClientConnection. In order to allow expectations to + // be set on this connection, the object must exist. So instead of allocating a new + // MockClientConnection in this method, it instead pops the first entry from + // next_connections_ and returns that. It also saves a pointer to that connection into + // created_connections_ so that it can be interacted with after it has been returned. + MockClientConnection* createNextConnection() { + created_connections_.push_back(next_connections_.front().release()); + next_connections_.pop_front(); + EXPECT_CALL(*created_connections_.back(), addConnectionCallbacks(_)) + .WillOnce( + Invoke([&](ConnectionCallbacks& cb) -> void { connection_callbacks_.push_back(&cb); })); + return created_connections_.back(); + } + + // Calls connect() on the impl and verifies that the timer is started. + void startConnect() { + EXPECT_CALL(*failover_timer_, enableTimer(std::chrono::milliseconds(300), nullptr)); + EXPECT_CALL(*failover_timer_, enabled()).WillRepeatedly(Return(true)); + EXPECT_CALL(*created_connections_[0], connect()); + impl_->connect(); + } + + // Connects the first (and only attempt). + void connectFirstAttempt() { + startConnect(); + + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + connection_callbacks_[0]->onEvent(ConnectionEvent::Connected); + } + + // Fires the failover timer and creates the next connection. + void timeOutAndStartNextAttempt() { + EXPECT_CALL(transport_socket_factory_, createTransportSocket(_)); + EXPECT_CALL(dispatcher_, createClientConnection_(address_list_[1], _, _, _)) + .WillOnce(testing::InvokeWithoutArgs( + this, &HappyEyeballsConnectionImplTest::createNextConnection)); + EXPECT_CALL(*next_connections_.back(), connect()); + EXPECT_CALL(*failover_timer_, enableTimer(std::chrono::milliseconds(300), nullptr)); + failover_timer_->invokeCallback(); + } + + // Have the second connection attempt succeed which should disable the fallback timer, + // and close the first attempt. + void connectSecondAttempt() { + ASSERT_EQ(2, created_connections_.size()); + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[1], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::NoFlush)); + connection_callbacks_[1]->onEvent(ConnectionEvent::Connected); + } + +protected: + Event::MockDispatcher dispatcher_; + testing::StrictMock* failover_timer_; + MockTransportSocketFactory transport_socket_factory_; + TransportSocketOptionsConstSharedPtr transport_socket_options_; + const ConnectionSocket::OptionsSharedPtr options_; + const std::vector address_list_; + std::vector*> created_connections_; + std::vector connection_callbacks_; + std::deque>> next_connections_; + std::unique_ptr impl_; +}; + +TEST_F(HappyEyeballsConnectionImplTest, Connect) { startConnect(); } + +TEST_F(HappyEyeballsConnectionImplTest, ConnectTimeout) { + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // Let the second attempt timeout to start the third and final attempt. + next_connections_.push_back(std::make_unique>()); + EXPECT_CALL(transport_socket_factory_, createTransportSocket(_)); + EXPECT_CALL(dispatcher_, createClientConnection_(address_list_[2], _, _, _)) + .WillOnce( + testing::InvokeWithoutArgs(this, &HappyEyeballsConnectionImplTest::createNextConnection)); + EXPECT_CALL(*next_connections_.back(), connect()); + // Since there are no more addresses to connect to, the fallback timer will not + // be rescheduled. + failover_timer_->invokeCallback(); +} + +TEST_F(HappyEyeballsConnectionImplTest, ConnectFailed) { + startConnect(); + + // When the first connection attempt fails, the next attempt will be immediately + // started and the timer will be armed for the third attempt. + next_connections_.push_back(std::make_unique>()); + EXPECT_CALL(transport_socket_factory_, createTransportSocket(_)); + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) + .WillOnce( + testing::InvokeWithoutArgs(this, &HappyEyeballsConnectionImplTest::createNextConnection)); + EXPECT_CALL(*next_connections_.back(), connect()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::NoFlush)); + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*failover_timer_, enableTimer(std::chrono::milliseconds(300), nullptr)); + connection_callbacks_[0]->onEvent(ConnectionEvent::RemoteClose); +} + +TEST_F(HappyEyeballsConnectionImplTest, ConnectFirstSuccess) { + startConnect(); + + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + connection_callbacks_[0]->onEvent(ConnectionEvent::Connected); +} + +TEST_F(HappyEyeballsConnectionImplTest, ConnectTimeoutThenFirstSuccess) { + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // Connect the first attempt and verify that the second is closed. + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[1], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[1], close(ConnectionCloseType::NoFlush)); + connection_callbacks_[0]->onEvent(ConnectionEvent::Connected); + + // Verify that calls are delegated to the right connection. + EXPECT_CALL(*created_connections_[0], connecting()).WillOnce(Return(false)); + EXPECT_FALSE(impl_->connecting()); +} + +TEST_F(HappyEyeballsConnectionImplTest, ConnectTimeoutThenSecondSuccess) { + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // Connect the second attempt and verify that the first is closed. + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[1], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::NoFlush)); + connection_callbacks_[1]->onEvent(ConnectionEvent::Connected); + + // Verify that calls are delegated to the right connection. + EXPECT_CALL(*created_connections_[1], connecting()).WillOnce(Return(false)); + EXPECT_FALSE(impl_->connecting()); +} + +TEST_F(HappyEyeballsConnectionImplTest, ConnectTimeoutThenSecondFailsAndFirstSucceeds) { + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // When the second attempt fails, the third and final attempt will be started. + next_connections_.push_back(std::make_unique>()); + EXPECT_CALL(transport_socket_factory_, createTransportSocket(_)); + EXPECT_CALL(dispatcher_, createClientConnection_(address_list_[2], _, _, _)) + .WillOnce( + testing::InvokeWithoutArgs(this, &HappyEyeballsConnectionImplTest::createNextConnection)); + EXPECT_CALL(*next_connections_.back(), connect()); + // Since there are no more address to connect to, the fallback timer will not + // be rescheduled. + EXPECT_CALL(*failover_timer_, disableTimer()); + ASSERT_EQ(2, created_connections_.size()); + + EXPECT_CALL(*created_connections_[1], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[1], close(ConnectionCloseType::NoFlush)); + connection_callbacks_[1]->onEvent(ConnectionEvent::RemoteClose); + + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::NoFlush)); + connection_callbacks_[0]->onEvent(ConnectionEvent::RemoteClose); +} + +TEST_F(HappyEyeballsConnectionImplTest, ConnectThenAllTimeoutAndFail) { + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // After the second timeout the third and final attempt will be started. + next_connections_.push_back(std::make_unique>()); + EXPECT_CALL(transport_socket_factory_, createTransportSocket(_)); + EXPECT_CALL(dispatcher_, createClientConnection_(address_list_[2], _, _, _)) + .WillOnce( + testing::InvokeWithoutArgs(this, &HappyEyeballsConnectionImplTest::createNextConnection)); + EXPECT_CALL(*next_connections_.back(), connect()); + ASSERT_EQ(2, created_connections_.size()); + failover_timer_->invokeCallback(); + + EXPECT_CALL(*created_connections_[1], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[1], close(ConnectionCloseType::NoFlush)); + connection_callbacks_[1]->onEvent(ConnectionEvent::RemoteClose); + + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::NoFlush)); + connection_callbacks_[0]->onEvent(ConnectionEvent::RemoteClose); + + EXPECT_CALL(*created_connections_[2], removeConnectionCallbacks(_)); + EXPECT_CALL(*failover_timer_, disableTimer()); + connection_callbacks_[2]->onEvent(ConnectionEvent::RemoteClose); +} + +TEST_F(HappyEyeballsConnectionImplTest, Id) { + uint64_t id = ConnectionImpl::nextGlobalIdForTest() - 1; + EXPECT_EQ(id, impl_->id()); + + startConnect(); + + EXPECT_EQ(id, impl_->id()); +} + +TEST_F(HappyEyeballsConnectionImplTest, HashKey) { + uint64_t id = ConnectionImpl::nextGlobalIdForTest() - 1; + + startConnect(); + + std::vector hash_key = {'A', 'B', 'C'}; + uint8_t* id_array = reinterpret_cast(&id); + impl_->hashKey(hash_key); + EXPECT_EQ(3 + sizeof(id), hash_key.size()); + EXPECT_EQ('A', hash_key[0]); + EXPECT_EQ('B', hash_key[1]); + EXPECT_EQ('C', hash_key[2]); + for (size_t i = 0; i < sizeof(id); ++i) { + EXPECT_EQ(id_array[i], hash_key[i + 3]); + } +} + +TEST_F(HappyEyeballsConnectionImplTest, NoDelay) { + EXPECT_CALL(*created_connections_[0], noDelay(true)); + impl_->noDelay(true); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + // noDelay() should be applied to the newly created connection. + EXPECT_CALL(*next_connections_.back(), noDelay(true)); + timeOutAndStartNextAttempt(); + + connectSecondAttempt(); + + // Verify that noDelay calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], noDelay(false)); + impl_->noDelay(false); +} + +TEST_F(HappyEyeballsConnectionImplTest, DetectEarlyCloseWhenReadDisabled) { + EXPECT_CALL(*created_connections_[0], detectEarlyCloseWhenReadDisabled(true)); + impl_->detectEarlyCloseWhenReadDisabled(true); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + // detectEarlyCloseWhenReadDisabled() should be applied to the newly created connection. + EXPECT_CALL(*next_connections_.back(), detectEarlyCloseWhenReadDisabled(true)); + timeOutAndStartNextAttempt(); + + connectSecondAttempt(); + + // Verify that detectEarlyCloseWhenReadDisabled() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], detectEarlyCloseWhenReadDisabled(false)); + impl_->detectEarlyCloseWhenReadDisabled(false); +} + +TEST_F(HappyEyeballsConnectionImplTest, SetDelayedCloseTimeout) { + startConnect(); + + EXPECT_CALL(*created_connections_[0], setDelayedCloseTimeout(std::chrono::milliseconds(5))); + impl_->setDelayedCloseTimeout(std::chrono::milliseconds(5)); + + next_connections_.push_back(std::make_unique>()); + // setDelayedCloseTimeout() should be applied to the newly created connection. + EXPECT_CALL(*next_connections_.back(), setDelayedCloseTimeout(std::chrono::milliseconds(5))); + timeOutAndStartNextAttempt(); + + connectSecondAttempt(); + + // Verify that setDelayedCloseTimeout() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], setDelayedCloseTimeout(std::chrono::milliseconds(10))); + impl_->setDelayedCloseTimeout(std::chrono::milliseconds(10)); +} + +TEST_F(HappyEyeballsConnectionImplTest, CloseDuringAttempt) { + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[1], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::FlushWrite)); + EXPECT_CALL(*created_connections_[1], close(ConnectionCloseType::NoFlush)); + impl_->close(ConnectionCloseType::FlushWrite); +} + +TEST_F(HappyEyeballsConnectionImplTest, CloseDuringAttemptWithCallbacks) { + startConnect(); + + MockConnectionCallbacks callbacks; + // The filter will be captured by the impl and not passed to the connection until it is closed. + impl_->addConnectionCallbacks(callbacks); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[1], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[1], close(ConnectionCloseType::NoFlush)); + // addConnectionCallbacks() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[0], addConnectionCallbacks(_)) + .WillOnce(Invoke([&](ConnectionCallbacks& c) -> void { EXPECT_EQ(&c, &callbacks); })); + EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::FlushWrite)); + impl_->close(ConnectionCloseType::FlushWrite); +} + +TEST_F(HappyEyeballsConnectionImplTest, CloseAfterAttemptComplete) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], close(ConnectionCloseType::FlushWrite)); + impl_->close(ConnectionCloseType::FlushWrite); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddReadFilter) { + MockReadFilterCallbacks callbacks; + ReadFilterSharedPtr filter = std::make_shared(); + filter->initializeReadFilterCallbacks(callbacks); + // The filter will be captured by the impl and not passed to the connection until it completes. + impl_->addReadFilter(filter); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // addReadFilter() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[1], addReadFilter(filter)); + connectSecondAttempt(); + + ReadFilterSharedPtr filter2 = std::make_shared(); + filter2->initializeReadFilterCallbacks(callbacks); + // Verify that addReadFilter() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], addReadFilter(filter2)); + impl_->addReadFilter(filter2); +} + +TEST_F(HappyEyeballsConnectionImplTest, RemoveReadFilter) { + startConnect(); + + connectFirstAttempt(); + + MockReadFilterCallbacks callbacks; + ReadFilterSharedPtr filter = std::make_shared(); + filter->initializeReadFilterCallbacks(callbacks); + // Verify that removeReadFilter() calls are delegated to the final connection. + EXPECT_CALL(*created_connections_[0], removeReadFilter(filter)); + impl_->removeReadFilter(filter); +} + +TEST_F(HappyEyeballsConnectionImplTest, RemoveReadFilterBeforeConnectFinished) { + startConnect(); + + MockReadFilterCallbacks callbacks; + ReadFilterSharedPtr filter = std::make_shared(); + filter->initializeReadFilterCallbacks(callbacks); + ReadFilterSharedPtr filter2 = std::make_shared(); + filter2->initializeReadFilterCallbacks(callbacks); + // The filters will be captured by the impl and not passed to the connection until it completes. + impl_->addReadFilter(filter); + impl_->addReadFilter(filter2); + + // The removal will be captured by the impl and not passed to the connection until it completes. + impl_->removeReadFilter(filter); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // Verify that addReadFilter() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], addReadFilter(filter2)); + connectSecondAttempt(); +} + +TEST_F(HappyEyeballsConnectionImplTest, InitializeReadFilters) { + startConnect(); + + // No read filters have been added + EXPECT_FALSE(impl_->initializeReadFilters()); + + MockReadFilterCallbacks callbacks; + ReadFilterSharedPtr filter = std::make_shared(); + filter->initializeReadFilterCallbacks(callbacks); + impl_->addReadFilter(filter); + + // initializeReadFilters() will be captured by the impl and not passed to the connection until it + // completes. + EXPECT_TRUE(impl_->initializeReadFilters()); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // initializeReadFilters() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[1], addReadFilter(_)); + EXPECT_CALL(*created_connections_[1], initializeReadFilters()).WillOnce(Return(true)); + connectSecondAttempt(); + + ReadFilterSharedPtr filter2 = std::make_shared(); + filter2->initializeReadFilterCallbacks(callbacks); + // Verify that addReadFilter() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], addReadFilter(filter2)); + impl_->addReadFilter(filter2); +} + +TEST_F(HappyEyeballsConnectionImplTest, InitializeReadFiltersAfterConnect) { + startConnect(); + + connectFirstAttempt(); + + // Verify that initializeReadFilters() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[0], initializeReadFilters()).WillOnce(Return(false)); + EXPECT_FALSE(impl_->initializeReadFilters()); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddConnectionCallbacks) { + MockConnectionCallbacks callbacks; + // The filter will be captured by the impl and not passed to the connection until it completes. + impl_->addConnectionCallbacks(callbacks); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // addConnectionCallbacks() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[1], addConnectionCallbacks(_)) + .WillOnce(Invoke([&](ConnectionCallbacks& c) -> void { EXPECT_EQ(&c, &callbacks); })); + connectSecondAttempt(); + + MockConnectionCallbacks callbacks2; + // Verify that addConnectionCallbacks() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], addConnectionCallbacks(_)) + .WillOnce(Invoke([&](ConnectionCallbacks& c) -> void { EXPECT_EQ(&c, &callbacks2); })); + impl_->addConnectionCallbacks(callbacks2); +} + +TEST_F(HappyEyeballsConnectionImplTest, RemoveConnectionCallbacks) { + MockConnectionCallbacks callbacks; + MockConnectionCallbacks callbacks2; + // The callbacks will be captured by the impl and not passed to the connection until it completes. + impl_->addConnectionCallbacks(callbacks); + impl_->addConnectionCallbacks(callbacks2); + + startConnect(); + + impl_->removeConnectionCallbacks(callbacks); + + // addConnectionCallbacks() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[0], addConnectionCallbacks(_)) + .WillOnce(Invoke([&](ConnectionCallbacks& c) -> void { EXPECT_EQ(&c, &callbacks2); })); + connectFirstAttempt(); + + // Verify that removeConnectionCallbacks calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)) + .WillOnce(Invoke([&](ConnectionCallbacks& c) -> void { EXPECT_EQ(&c, &callbacks2); })); + impl_->removeConnectionCallbacks(callbacks2); +} + +TEST_F(HappyEyeballsConnectionImplTest, WriteBeforeConnect) { + Buffer::OwnedImpl data("hello world"); + bool end_stream = false; + + impl_->write(data, end_stream); + + startConnect(); + + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + // The call to write() will be replayed on the underlying connection. + EXPECT_CALL(*created_connections_[0], write(_, _)) + .WillOnce(Invoke([](Buffer::Instance& data, bool end_stream) -> void { + EXPECT_EQ("hello world", data.toString()); + EXPECT_FALSE(end_stream); + ; + })); + connection_callbacks_[0]->onEvent(ConnectionEvent::Connected); +} + +TEST_F(HappyEyeballsConnectionImplTest, WriteTwiceBeforeConnect) { + Buffer::OwnedImpl data1("hello world"); + Buffer::OwnedImpl data2("goodbye"); + + impl_->write(data1, false); + impl_->write(data2, true); + + startConnect(); + + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + // The call to write() will be replayed on the underlying connection. + EXPECT_CALL(*created_connections_[0], write(_, _)) + .WillOnce(Invoke([](Buffer::Instance& data, bool end_stream) -> void { + EXPECT_EQ("hello worldgoodbye", data.toString()); + EXPECT_TRUE(end_stream); + ; + })); + connection_callbacks_[0]->onEvent(ConnectionEvent::Connected); +} + +TEST_F(HappyEyeballsConnectionImplTest, Write) { + startConnect(); + + connectFirstAttempt(); + + Buffer::OwnedImpl data("hello world"); + // The call to write() will be replayed on the underlying connection. + EXPECT_CALL(*created_connections_[0], write(_, _)) + .WillOnce(Invoke([](Buffer::Instance& data, bool end_stream) -> void { + EXPECT_EQ("hello world", data.toString()); + EXPECT_TRUE(end_stream); + ; + })); + impl_->write(data, true); +} + +TEST_F(HappyEyeballsConnectionImplTest, SetBufferLimits) { + startConnect(); + + EXPECT_CALL(*created_connections_[0], setBufferLimits(42)); + impl_->setBufferLimits(42); + + next_connections_.push_back(std::make_unique>()); + // setBufferLimits() should be applied to the newly created connection. + EXPECT_CALL(*next_connections_.back(), setBufferLimits(42)); + timeOutAndStartNextAttempt(); + + connectSecondAttempt(); + + // Verify that removeConnectionCallbacks calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], setBufferLimits(420)); + impl_->setBufferLimits(420); +} + +TEST_F(HappyEyeballsConnectionImplTest, WriteBeforeLimit) { + startConnect(); + + Buffer::OwnedImpl data("hello world"); + size_t length = data.length(); + bool end_stream = false; + + impl_->write(data, end_stream); + EXPECT_FALSE(impl_->aboveHighWatermark()); + + EXPECT_CALL(*created_connections_[0], setBufferLimits(length - 1)); + impl_->setBufferLimits(length - 1); + EXPECT_TRUE(impl_->aboveHighWatermark()); + + // The call to write() will be replayed on the underlying connection. + EXPECT_CALL(*created_connections_[0], write(_, _)) + .WillOnce(Invoke([](Buffer::Instance& data, bool end_stream) -> void { + EXPECT_EQ("hello world", data.toString()); + EXPECT_FALSE(end_stream); + })); + EXPECT_CALL(*created_connections_[0], bufferLimit()).WillRepeatedly(Return(length - 1)); + connectFirstAttempt(); +} + +TEST_F(HappyEyeballsConnectionImplTest, WriteBeforeConnectOverLimit) { + startConnect(); + + Buffer::OwnedImpl data("hello world"); + size_t length = data.length(); + bool end_stream = false; + + EXPECT_CALL(*created_connections_[0], setBufferLimits(length - 1)); + impl_->setBufferLimits(data.length() - 1); + + impl_->write(data, end_stream); + + EXPECT_CALL(*created_connections_[0], bufferLimit()).WillRepeatedly(Return(length - 1)); + // The call to write() will be replayed on the underlying connection. + EXPECT_CALL(*created_connections_[0], write(_, _)) + .WillOnce(Invoke([](Buffer::Instance& data, bool end_stream) -> void { + EXPECT_EQ("hello world", data.toString()); + EXPECT_FALSE(end_stream); + })); + connectFirstAttempt(); +} + +TEST_F(HappyEyeballsConnectionImplTest, WriteBeforeConnectOverLimitWithCallbacks) { + startConnect(); + + MockConnectionCallbacks callbacks; + // The filter will be captured by the impl and not passed to the connection until it is closed. + impl_->addConnectionCallbacks(callbacks); + + Buffer::OwnedImpl data("hello world"); + size_t length = data.length(); + bool end_stream = false; + + EXPECT_CALL(*created_connections_[0], setBufferLimits(length - 1)); + impl_->setBufferLimits(data.length() - 1); + + EXPECT_CALL(callbacks, onAboveWriteBufferHighWatermark()); + impl_->write(data, end_stream); + + { + // The call to write() will be replayed on the underlying connection, but it will be done + // after the temporary callbacks are removed and before the final callbacks are added. + // This causes the underlying connection's high watermark notification to be swallowed. + testing::InSequence s; + EXPECT_CALL(*failover_timer_, disableTimer()); + EXPECT_CALL(*created_connections_[0], removeConnectionCallbacks(_)); + EXPECT_CALL(*created_connections_[0], bufferLimit()).WillRepeatedly(Return(length - 1)); + EXPECT_CALL(*created_connections_[0], write(_, _)); + EXPECT_CALL(*created_connections_[0], addConnectionCallbacks(_)); + connection_callbacks_[0]->onEvent(ConnectionEvent::Connected); + } +} + +TEST_F(HappyEyeballsConnectionImplTest, AboveHighWatermark) { + startConnect(); + + EXPECT_FALSE(impl_->aboveHighWatermark()); + + connectFirstAttempt(); + + // Delegates to the connection once connected. + EXPECT_CALL(*created_connections_[0], aboveHighWatermark()).WillOnce(Return(true)); + EXPECT_TRUE(impl_->aboveHighWatermark()); +} + +TEST_F(HappyEyeballsConnectionImplTest, SetConnectionStats) { + StrictMock rx_total; + StrictMock rx_current; + StrictMock tx_total; + StrictMock tx_current; + StrictMock bind_errors; + StrictMock delayed_close_timeouts; + + Connection::ConnectionStats cs = {rx_total, rx_current, tx_total, + tx_current, &bind_errors, &delayed_close_timeouts}; + EXPECT_CALL(*created_connections_[0], setConnectionStats(_)) + .WillOnce(Invoke([&](const Connection::ConnectionStats& s) -> void { EXPECT_EQ(&s, &cs); })); + impl_->setConnectionStats(cs); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + // setConnectionStats() should be applied to the newly created connection. + EXPECT_CALL(*next_connections_.back(), setConnectionStats(_)) + .WillOnce(Invoke([&](const Connection::ConnectionStats& s) -> void { EXPECT_EQ(&s, &cs); })); + timeOutAndStartNextAttempt(); + + connectSecondAttempt(); + + // Verify that setConnectionStats calls are delegated to the remaining connection. + Connection::ConnectionStats cs2 = {rx_total, rx_current, tx_total, + tx_current, &bind_errors, &delayed_close_timeouts}; + EXPECT_CALL(*created_connections_[1], setConnectionStats(_)) + .WillOnce(Invoke([&](const Connection::ConnectionStats& s) -> void { EXPECT_EQ(&s, &cs2); })); + impl_->setConnectionStats(cs2); +} + +TEST_F(HappyEyeballsConnectionImplTest, State) { + startConnect(); + + EXPECT_CALL(*created_connections_[0], state()).WillRepeatedly(Return(Connection::State::Open)); + EXPECT_EQ(Connection::State::Open, impl_->state()); + + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], state()).WillOnce(Return(Connection::State::Closing)); + EXPECT_EQ(Connection::State::Closing, impl_->state()); +} + +TEST_F(HappyEyeballsConnectionImplTest, Connecting) { + startConnect(); + + EXPECT_CALL(*created_connections_[0], connecting()).WillRepeatedly(Return(true)); + EXPECT_TRUE(impl_->connecting()); + + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], connecting()).WillRepeatedly(Return(false)); + EXPECT_FALSE(impl_->connecting()); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddWriteFilter) { + MockWriteFilterCallbacks callbacks; + WriteFilterSharedPtr filter = std::make_shared(); + filter->initializeWriteFilterCallbacks(callbacks); + // The filter will be captured by the impl and not passed to the connection until it completes. + impl_->addWriteFilter(filter); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // addWriteFilter() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[1], addWriteFilter(filter)); + connectSecondAttempt(); + + WriteFilterSharedPtr filter2 = std::make_shared(); + filter2->initializeWriteFilterCallbacks(callbacks); + // Verify that addWriteFilter() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], addWriteFilter(filter2)); + impl_->addWriteFilter(filter2); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddWriteFilterAfterConnect) { + connectFirstAttempt(); + + MockWriteFilterCallbacks callbacks; + WriteFilterSharedPtr filter = std::make_shared(); + filter->initializeWriteFilterCallbacks(callbacks); + EXPECT_CALL(*created_connections_[0], addWriteFilter(filter)); + impl_->addWriteFilter(filter); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddFilter) { + MockReadFilterCallbacks read_callbacks; + MockWriteFilterCallbacks write_callbacks; + FilterSharedPtr filter = std::make_shared(); + filter->initializeReadFilterCallbacks(read_callbacks); + filter->initializeWriteFilterCallbacks(write_callbacks); + // The filter will be captured by the impl and not passed to the connection until it completes. + impl_->addFilter(filter); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // addFilter() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[1], addFilter(filter)); + connectSecondAttempt(); + + FilterSharedPtr filter2 = std::make_shared(); + filter2->initializeReadFilterCallbacks(read_callbacks); + filter2->initializeWriteFilterCallbacks(write_callbacks); + // Verify that addFilter() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], addFilter(filter2)); + impl_->addFilter(filter2); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddFilterAfterConnect) { + connectFirstAttempt(); + + MockReadFilterCallbacks read_callbacks; + MockWriteFilterCallbacks write_callbacks; + FilterSharedPtr filter = std::make_shared(); + filter->initializeReadFilterCallbacks(read_callbacks); + filter->initializeWriteFilterCallbacks(write_callbacks); + EXPECT_CALL(*created_connections_[0], addFilter(filter)); + impl_->addFilter(filter); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddBytesSentCallback) { + Connection::BytesSentCb callback = [](uint64_t) { return true; }; + // The filter will be captured by the impl and not passed to the connection until it completes. + impl_->addBytesSentCallback(callback); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // addBytesSentCallback() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[1], addBytesSentCallback(_)); + connectSecondAttempt(); + + Connection::BytesSentCb callback2 = [](uint64_t) { return true; }; + // Verify that addBytesSentCallback() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], addBytesSentCallback(_)); + impl_->addBytesSentCallback(callback2); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddBytesSentCallbackAfterConnect) { + connectFirstAttempt(); + + Connection::BytesSentCb cb = [](uint64_t) { return true; }; + EXPECT_CALL(*created_connections_[0], addBytesSentCallback(_)); + impl_->addBytesSentCallback(cb); +} + +TEST_F(HappyEyeballsConnectionImplTest, EnableHalfClose) { + EXPECT_CALL(*created_connections_[0], enableHalfClose(true)); + impl_->enableHalfClose(true); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + // enableHalfClose() should be applied to the newly created connection. + EXPECT_CALL(*next_connections_.back(), enableHalfClose(true)); + timeOutAndStartNextAttempt(); + + connectSecondAttempt(); + + // Verify that enableHalfClose calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], enableHalfClose(false)); + impl_->enableHalfClose(false); +} + +TEST_F(HappyEyeballsConnectionImplTest, EnableHalfCloseAfterConnect) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], enableHalfClose(true)); + impl_->enableHalfClose(true); +} + +TEST_F(HappyEyeballsConnectionImplTest, IsHalfCloseEnabled) { + EXPECT_CALL(*created_connections_[0], isHalfCloseEnabled()).WillOnce(Return(false)); + EXPECT_FALSE(impl_->isHalfCloseEnabled()); + + EXPECT_CALL(*created_connections_[0], isHalfCloseEnabled()).WillOnce(Return(true)); + EXPECT_TRUE(impl_->isHalfCloseEnabled()); + + connectFirstAttempt(); +} + +TEST_F(HappyEyeballsConnectionImplTest, IsHalfCloseEnabledAfterConnect) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], isHalfCloseEnabled()).WillOnce(Return(true)); + EXPECT_TRUE(impl_->isHalfCloseEnabled()); +} + +TEST_F(HappyEyeballsConnectionImplTest, ReadDisable) { + // The disables will be captured by the impl and not passed to the connection until it completes. + impl_->readDisable(true); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + timeOutAndStartNextAttempt(); + + // The disables will be captured by the impl and not passed to the connection until it completes. + impl_->readDisable(true); + impl_->readDisable(true); + // Read disable count should now be 2. + impl_->readDisable(false); + + // readDisable() should be applied to the now final connection. + EXPECT_CALL(*created_connections_[1], readDisable(true)).Times(2); + connectSecondAttempt(); + + // Verify that addBytesSentCallback() calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], readDisable(false)); + impl_->readDisable(false); +} + +TEST_F(HappyEyeballsConnectionImplTest, ReadEnabled) { + EXPECT_TRUE(impl_->readEnabled()); + impl_->readDisable(true); // Disable count 1. + EXPECT_FALSE(impl_->readEnabled()); + + startConnect(); + + impl_->readDisable(true); // Disable count 2 + EXPECT_FALSE(impl_->readEnabled()); + impl_->readDisable(false); // Disable count 1 + EXPECT_FALSE(impl_->readEnabled()); + impl_->readDisable(false); // Disable count 0 + EXPECT_TRUE(impl_->readEnabled()); +} + +TEST_F(HappyEyeballsConnectionImplTest, ReadEnabledAfterConnect) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], readEnabled()).WillOnce(Return(true)); + EXPECT_TRUE(impl_->readEnabled()); +} + +TEST_F(HappyEyeballsConnectionImplTest, StartSecureTransport) { + EXPECT_CALL(*created_connections_[0], startSecureTransport()).WillOnce(Return(true)); + EXPECT_TRUE(impl_->startSecureTransport()); + + startConnect(); + + next_connections_.push_back(std::make_unique>()); + // startSecureTransport() should be applied to the newly created connection. + EXPECT_CALL(*next_connections_.back(), startSecureTransport()).WillOnce(Return(true)); + timeOutAndStartNextAttempt(); + + EXPECT_CALL(*created_connections_[0], startSecureTransport()).WillOnce(Return(false)); + EXPECT_CALL(*created_connections_[1], startSecureTransport()).WillOnce(Return(true)); + EXPECT_FALSE(impl_->startSecureTransport()); + + connectSecondAttempt(); + + // Verify that startSecureTransport calls are delegated to the remaining connection. + EXPECT_CALL(*created_connections_[1], startSecureTransport()).WillOnce(Return(false)); + EXPECT_FALSE(impl_->startSecureTransport()); +} + +TEST_F(HappyEyeballsConnectionImplTest, StartSecureTransportAfterConnect) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], startSecureTransport()); + impl_->startSecureTransport(); +} + +// Tests for HappyEyeballsConnectionImpl methods which simply delegate to the first connection. + +TEST_F(HappyEyeballsConnectionImplTest, Dispatcher) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); + EXPECT_EQ(&dispatcher_, &(impl_->dispatcher())); +} + +TEST_F(HappyEyeballsConnectionImplTest, BufferLimit) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], bufferLimit()).WillOnce(Return(42)); + EXPECT_EQ(42, impl_->bufferLimit()); +} + +TEST_F(HappyEyeballsConnectionImplTest, NextProtocol) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], nextProtocol()).WillOnce(Return("h3")); + EXPECT_EQ("h3", impl_->nextProtocol()); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddressProvider) { + connectFirstAttempt(); + + const ConnectionInfoSetterImpl provider(std::make_shared(80), + std::make_shared(80)); + EXPECT_CALL(*created_connections_[0], connectionInfoProvider()).WillOnce(ReturnRef(provider)); + impl_->connectionInfoProvider(); +} + +TEST_F(HappyEyeballsConnectionImplTest, AddressProviderSharedPtr) { + connectFirstAttempt(); + + ConnectionInfoProviderSharedPtr provider = std::make_shared( + std::make_shared("127.0.0.2"), + std::make_shared("127.0.0.1")); + EXPECT_CALL(*created_connections_[0], connectionInfoProviderSharedPtr()) + .WillOnce(Return(provider)); + EXPECT_EQ(provider, impl_->connectionInfoProviderSharedPtr()); +} + +TEST_F(HappyEyeballsConnectionImplTest, UnixSocketPeerCredentials) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], unixSocketPeerCredentials()) + .WillOnce(Return(absl::optional())); + EXPECT_FALSE(impl_->unixSocketPeerCredentials().has_value()); +} + +TEST_F(HappyEyeballsConnectionImplTest, Ssl) { + connectFirstAttempt(); + + Ssl::ConnectionInfoConstSharedPtr ssl = nullptr; + EXPECT_CALL(*created_connections_[0], ssl()).WillOnce(Return(ssl)); + EXPECT_EQ(ssl, impl_->ssl()); +} + +TEST_F(HappyEyeballsConnectionImplTest, SocketOptions) { + connectFirstAttempt(); + + ConnectionSocket::OptionsSharedPtr options = nullptr; + EXPECT_CALL(*created_connections_[0], socketOptions()).WillOnce(ReturnRef(options)); + EXPECT_EQ(options, impl_->socketOptions()); +} + +TEST_F(HappyEyeballsConnectionImplTest, RequestedServerName) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], requestedServerName()).WillOnce(Return("name")); + EXPECT_EQ("name", impl_->requestedServerName()); +} + +TEST_F(HappyEyeballsConnectionImplTest, StreamInfo) { + connectFirstAttempt(); + + StreamInfo::MockStreamInfo info; + EXPECT_CALL(*created_connections_[0], streamInfo()).WillOnce(ReturnRef(info)); + EXPECT_EQ(&info, &impl_->streamInfo()); +} + +TEST_F(HappyEyeballsConnectionImplTest, ConstStreamInfo) { + connectFirstAttempt(); + + StreamInfo::MockStreamInfo info; + EXPECT_CALL(*created_connections_[0], streamInfo()).WillOnce(ReturnRef(info)); + const HappyEyeballsConnectionImpl* impl = impl_.get(); + EXPECT_EQ(&info, &impl->streamInfo()); +} + +TEST_F(HappyEyeballsConnectionImplTest, TransportFailureReason) { + connectFirstAttempt(); + + EXPECT_CALL(*created_connections_[0], transportFailureReason()).WillOnce(Return("reason")); + EXPECT_EQ("reason", impl_->transportFailureReason()); +} + +TEST_F(HappyEyeballsConnectionImplTest, LastRoundTripTime) { + connectFirstAttempt(); + + absl::optional rtt = std::chrono::milliseconds(5); + EXPECT_CALL(*created_connections_[0], lastRoundTripTime()).WillOnce(Return(rtt)); + EXPECT_EQ(rtt, impl_->lastRoundTripTime()); +} + +} // namespace Network +} // namespace Envoy diff --git a/test/common/network/listen_socket_impl_test.cc b/test/common/network/listen_socket_impl_test.cc index 5468031588d41..8f514e993084c 100644 --- a/test/common/network/listen_socket_impl_test.cc +++ b/test/common/network/listen_socket_impl_test.cc @@ -99,9 +99,9 @@ class ListenSocketImplTest : public testing::TestWithParam { EXPECT_EQ(0, socket1->listen(0).return_value_); } - EXPECT_EQ(addr->ip()->port(), socket1->addressProvider().localAddress()->ip()->port()); + EXPECT_EQ(addr->ip()->port(), socket1->connectionInfoProvider().localAddress()->ip()->port()); EXPECT_EQ(addr->ip()->addressAsString(), - socket1->addressProvider().localAddress()->ip()->addressAsString()); + socket1->connectionInfoProvider().localAddress()->ip()->addressAsString()); EXPECT_EQ(Type, socket1->socketType()); auto option2 = std::make_unique(); @@ -122,7 +122,7 @@ class ListenSocketImplTest : public testing::TestWithParam { Network::IoHandlePtr io_handle = std::make_unique(socket_result.return_value_); auto socket3 = createListenSocketPtr(std::move(io_handle), addr, nullptr); - EXPECT_EQ(socket3->addressProvider().localAddress()->asString(), addr->asString()); + EXPECT_EQ(socket3->connectionInfoProvider().localAddress()->asString(), addr->asString()); // Test successful. return; @@ -132,11 +132,11 @@ class ListenSocketImplTest : public testing::TestWithParam { void testBindPortZero() { auto loopback = Network::Test::getCanonicalLoopbackAddress(version_); auto socket = createListenSocketPtr(loopback, nullptr, true); - EXPECT_EQ(Address::Type::Ip, socket->addressProvider().localAddress()->type()); - EXPECT_EQ(version_, socket->addressProvider().localAddress()->ip()->version()); + EXPECT_EQ(Address::Type::Ip, socket->connectionInfoProvider().localAddress()->type()); + EXPECT_EQ(version_, socket->connectionInfoProvider().localAddress()->ip()->version()); EXPECT_EQ(loopback->ip()->addressAsString(), - socket->addressProvider().localAddress()->ip()->addressAsString()); - EXPECT_GT(socket->addressProvider().localAddress()->ip()->port(), 0U); + socket->connectionInfoProvider().localAddress()->ip()->addressAsString()); + EXPECT_GT(socket->connectionInfoProvider().localAddress()->ip()->port(), 0U); EXPECT_EQ(Type, socket->socketType()); } }; @@ -175,9 +175,9 @@ TEST_P(ListenSocketImplTestTcp, SetLocalAddress) { TestListenSocket socket(Utility::getIpv4AnyAddress()); - socket.addressProvider().setLocalAddress(address); + socket.connectionInfoProvider().setLocalAddress(address); - EXPECT_EQ(socket.addressProvider().localAddress(), address); + EXPECT_EQ(socket.connectionInfoProvider().localAddress(), address); } TEST_P(ListenSocketImplTestTcp, CheckIpVersionWithNullLocalAddress) { diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 562b5cba4ec68..b1f0aac6462d6 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -38,7 +38,7 @@ static void errorCallbackTest(Address::IpVersion version) { Network::ListenerPtr listener = dispatcher->createListener(socket, listener_callbacks, true); Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); @@ -91,7 +91,7 @@ TEST_P(TcpListenerImplTest, UseActualDst) { listener_callbacks2, false); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); @@ -103,8 +103,8 @@ TEST_P(TcpListenerImplTest, UseActualDst) { .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void { Network::ConnectionPtr conn = dispatcher_->createServerConnection( std::move(accepted_socket), Network::Test::createRawBufferSocket(), stream_info); - EXPECT_EQ(*conn->addressProvider().localAddress(), - *socket->addressProvider().localAddress()); + EXPECT_EQ(*conn->connectionInfoProvider().localAddress(), + *socket->connectionInfoProvider().localAddress()); client_connection->close(ConnectionCloseType::NoFlush); conn->close(ConnectionCloseType::NoFlush); dispatcher_->exit(); @@ -136,9 +136,10 @@ TEST_P(TcpListenerImplTest, GlobalConnectionLimitEnforcement) { auto initiate_connections = [&](const int count) { for (int i = 0; i < count; ++i) { - client_connections.emplace_back(dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), - Network::Test::createRawBufferSocket(), nullptr)); + client_connections.emplace_back( + dispatcher_->createClientConnection(socket->connectionInfoProvider().localAddress(), + Network::Address::InstanceConstSharedPtr(), + Network::Test::createRawBufferSocket(), nullptr)); client_connections.back()->connect(); } }; @@ -189,9 +190,9 @@ TEST_P(TcpListenerImplTest, WildcardListenerUseActualDst) { Network::TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, listener_callbacks, true); - auto local_dst_address = - Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), - socket->addressProvider().localAddress()->ip()->port()); + auto local_dst_address = Network::Utility::getAddressWithPort( + *Network::Test::getCanonicalLoopbackAddress(version_), + socket->connectionInfoProvider().localAddress()->ip()->port()); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( local_dst_address, Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); @@ -202,7 +203,7 @@ TEST_P(TcpListenerImplTest, WildcardListenerUseActualDst) { .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { Network::ConnectionPtr conn = dispatcher_->createServerConnection( std::move(socket), Network::Test::createRawBufferSocket(), stream_info); - EXPECT_EQ(*conn->addressProvider().localAddress(), *local_dst_address); + EXPECT_EQ(*conn->connectionInfoProvider().localAddress(), *local_dst_address); client_connection->close(ConnectionCloseType::NoFlush); conn->close(ConnectionCloseType::NoFlush); dispatcher_->exit(); @@ -227,18 +228,18 @@ TEST_P(TcpListenerImplTest, WildcardListenerIpv4Compat) { Network::MockTcpListenerCallbacks listener_callbacks; Random::MockRandomGenerator random_generator; - ASSERT_TRUE(socket->addressProvider().localAddress()->ip()->isAnyAddress()); + ASSERT_TRUE(socket->connectionInfoProvider().localAddress()->ip()->isAnyAddress()); // Do not redirect since use_original_dst is false. Network::TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, listener_callbacks, true); - auto listener_address = - Network::Utility::getAddressWithPort(*Network::Test::getCanonicalLoopbackAddress(version_), - socket->addressProvider().localAddress()->ip()->port()); - auto local_dst_address = - Network::Utility::getAddressWithPort(*Network::Utility::getCanonicalIpv4LoopbackAddress(), - socket->addressProvider().localAddress()->ip()->port()); + auto listener_address = Network::Utility::getAddressWithPort( + *Network::Test::getCanonicalLoopbackAddress(version_), + socket->connectionInfoProvider().localAddress()->ip()->port()); + auto local_dst_address = Network::Utility::getAddressWithPort( + *Network::Utility::getCanonicalIpv4LoopbackAddress(), + socket->connectionInfoProvider().localAddress()->ip()->port()); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( local_dst_address, Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); @@ -249,11 +250,11 @@ TEST_P(TcpListenerImplTest, WildcardListenerIpv4Compat) { .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { Network::ConnectionPtr conn = dispatcher_->createServerConnection( std::move(socket), Network::Test::createRawBufferSocket(), stream_info); - EXPECT_EQ(conn->addressProvider().localAddress()->ip()->version(), - conn->addressProvider().remoteAddress()->ip()->version()); - EXPECT_EQ(conn->addressProvider().localAddress()->asString(), + EXPECT_EQ(conn->connectionInfoProvider().localAddress()->ip()->version(), + conn->connectionInfoProvider().remoteAddress()->ip()->version()); + EXPECT_EQ(conn->connectionInfoProvider().localAddress()->asString(), local_dst_address->asString()); - EXPECT_EQ(*conn->addressProvider().localAddress(), *local_dst_address); + EXPECT_EQ(*conn->connectionInfoProvider().localAddress(), *local_dst_address); client_connection->close(ConnectionCloseType::NoFlush); conn->close(ConnectionCloseType::NoFlush); dispatcher_->exit(); @@ -277,7 +278,7 @@ TEST_P(TcpListenerImplTest, DisableAndEnableListener) { listener.disable(); ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->addConnectionCallbacks(connection_callbacks); client_connection->connect(); @@ -325,7 +326,7 @@ TEST_P(TcpListenerImplTest, SetListenerRejectFractionZero) { EXPECT_CALL(listener_callbacks, onAccept_(_)).WillOnce([&] { dispatcher_->exit(); }); ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->addConnectionCallbacks(connection_callbacks); client_connection->connect(); @@ -363,7 +364,7 @@ TEST_P(TcpListenerImplTest, SetListenerRejectFractionIntermediate) { { ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->addConnectionCallbacks(connection_callbacks); client_connection->connect(); @@ -386,7 +387,7 @@ TEST_P(TcpListenerImplTest, SetListenerRejectFractionIntermediate) { { ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->addConnectionCallbacks(connection_callbacks); client_connection->connect(); @@ -424,7 +425,7 @@ TEST_P(TcpListenerImplTest, SetListenerRejectFractionAll) { } ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->addConnectionCallbacks(connection_callbacks); client_connection->connect(); diff --git a/test/common/network/socket_option_factory_test.cc b/test/common/network/socket_option_factory_test.cc index a32fae30b85a7..e89c895142164 100644 --- a/test/common/network/socket_option_factory_test.cc +++ b/test/common/network/socket_option_factory_test.cc @@ -33,13 +33,13 @@ class SocketOptionFactoryTest : public testing::Test { testing::NiceMock socket_mock_; Api::MockOsSysCalls os_sys_calls_mock_; - void SetUp() override { socket_mock_.address_provider_->setLocalAddress(nullptr); } + void SetUp() override { socket_mock_.connection_info_provider_->setLocalAddress(nullptr); } void makeSocketV4() { - socket_mock_.address_provider_->setLocalAddress( + socket_mock_.connection_info_provider_->setLocalAddress( std::make_unique("1.2.3.4", 5678)); } void makeSocketV6() { - socket_mock_.address_provider_->setLocalAddress( + socket_mock_.connection_info_provider_->setLocalAddress( std::make_unique("::1:2:3:4", 5678)); } }; diff --git a/test/common/network/socket_option_test.h b/test/common/network/socket_option_test.h index a13a9a76b61c9..9afd8dd2236e8 100644 --- a/test/common/network/socket_option_test.h +++ b/test/common/network/socket_option_test.h @@ -25,7 +25,7 @@ namespace { class SocketOptionTest : public testing::Test { public: SocketOptionTest() { - socket_.address_provider_->setLocalAddress(nullptr); + socket_.connection_info_provider_->setLocalAddress(nullptr); EXPECT_CALL(os_sys_calls_, socket(_, _, _)) .Times(AnyNumber()) diff --git a/test/common/network/udp_fuzz.cc b/test/common/network/udp_fuzz.cc index 67534ef927e70..a29f6bc6efcad 100644 --- a/test/common/network/udp_fuzz.cc +++ b/test/common/network/udp_fuzz.cc @@ -90,7 +90,7 @@ class UdpFuzz { dispatcherImpl().timeSource(), config); Network::Address::Instance* send_to_addr_ = new Network::Address::Ipv4Instance( - "127.0.0.1", server_socket_->addressProvider().localAddress()->ip()->port()); + "127.0.0.1", server_socket_->connectionInfoProvider().localAddress()->ip()->port()); // Now do all of the fuzzing static const int MaxPackets = 15; diff --git a/test/common/network/udp_listener_impl_batch_writer_test.cc b/test/common/network/udp_listener_impl_batch_writer_test.cc index 7e6f1ec296242..39b69e86c02af 100644 --- a/test/common/network/udp_listener_impl_batch_writer_test.cc +++ b/test/common/network/udp_listener_impl_batch_writer_test.cc @@ -197,8 +197,9 @@ TEST_P(UdpListenerImplBatchWriterTest, WriteBlocked) { // First have initial payload added to the udp_packet_writer's internal buffer. Buffer::InstancePtr initial_buffer(new Buffer::OwnedImpl()); initial_buffer->add(initial_payload); - UdpSendData initial_send_data{ - send_to_addr_->ip(), *server_socket_->addressProvider().localAddress(), *initial_buffer}; + UdpSendData initial_send_data{send_to_addr_->ip(), + *server_socket_->connectionInfoProvider().localAddress(), + *initial_buffer}; auto send_result = listener_->send(initial_send_data); internal_buffer.append(initial_payload); EXPECT_TRUE(send_result.ok()); @@ -221,8 +222,9 @@ TEST_P(UdpListenerImplBatchWriterTest, WriteBlocked) { // Now send the following payload Buffer::InstancePtr following_buffer(new Buffer::OwnedImpl()); following_buffer->add(following_payload); - UdpSendData following_send_data{ - send_to_addr_->ip(), *server_socket_->addressProvider().localAddress(), *following_buffer}; + UdpSendData following_send_data{send_to_addr_->ip(), + *server_socket_->connectionInfoProvider().localAddress(), + *following_buffer}; send_result = listener_->send(following_send_data); if (following_payload.length() < initial_payload.length()) { diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index 21e640de9c5a3..a16a13e8f5653 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -345,7 +345,7 @@ TEST_P(UdpListenerImplTest, UdpEcho) { TEST_P(UdpListenerImplTest, UdpListenerEnableDisable) { setup(); - auto const* server_ip = server_socket_->addressProvider().localAddress()->ip(); + auto const* server_ip = server_socket_->connectionInfoProvider().localAddress()->ip(); ASSERT_NE(server_ip, nullptr); // We first disable the listener and then send two packets. @@ -394,7 +394,7 @@ TEST_P(UdpListenerImplTest, UdpListenerEnableDisable) { TEST_P(UdpListenerImplTest, UdpListenerRecvMsgError) { setup(); - auto const* server_ip = server_socket_->addressProvider().localAddress()->ip(); + auto const* server_ip = server_socket_->connectionInfoProvider().localAddress()->ip(); ASSERT_NE(server_ip, nullptr); // When the `receive` system call returns an error, we expect the `onReceiveError` @@ -473,8 +473,8 @@ TEST_P(UdpListenerImplTest, SendDataError) { Buffer::InstancePtr buffer(new Buffer::OwnedImpl()); buffer->add(payload); // send data to itself - UdpSendData send_data{send_to_addr_->ip(), *server_socket_->addressProvider().localAddress(), - *buffer}; + UdpSendData send_data{send_to_addr_->ip(), + *server_socket_->connectionInfoProvider().localAddress(), *buffer}; // Inject mocked OsSysCalls implementation to mock a write failure. Api::MockOsSysCalls os_sys_calls; diff --git a/test/common/network/udp_listener_impl_test_base.h b/test/common/network/udp_listener_impl_test_base.h index 527018259cfa4..112f89d68dc30 100644 --- a/test/common/network/udp_listener_impl_test_base.h +++ b/test/common/network/udp_listener_impl_test_base.h @@ -42,11 +42,11 @@ class UdpListenerImplTestBase : public ListenerImplTestBase { if (version_ == Address::IpVersion::v4) { return new Address::Ipv4Instance( Network::Test::getLoopbackAddressString(version_), - server_socket_->addressProvider().localAddress()->ip()->port()); + server_socket_->connectionInfoProvider().localAddress()->ip()->port()); } return new Address::Ipv6Instance( Network::Test::getLoopbackAddressString(version_), - server_socket_->addressProvider().localAddress()->ip()->port()); + server_socket_->connectionInfoProvider().localAddress()->ip()->port()); } SocketSharedPtr createServerSocket(bool bind) { @@ -68,7 +68,7 @@ class UdpListenerImplTestBase : public ListenerImplTestBase { if (version_ == Address::IpVersion::v4) { // Linux kernel regards any 127.x.x.x as local address. But Mac OS doesn't. send_from_addr = std::make_shared( - "127.0.0.1", server_socket_->addressProvider().localAddress()->ip()->port()); + "127.0.0.1", server_socket_->connectionInfoProvider().localAddress()->ip()->port()); } else { // Only use non-local v6 address if IP_FREEBIND is supported. Otherwise use // ::1 to avoid EINVAL error. Unfortunately this can't verify that sendmsg with @@ -80,7 +80,7 @@ class UdpListenerImplTestBase : public ListenerImplTestBase { #else "::1", #endif - server_socket_->addressProvider().localAddress()->ip()->port()); + server_socket_->connectionInfoProvider().localAddress()->ip()->port()); } return send_from_addr; } diff --git a/test/common/network/utility_test.cc b/test/common/network/utility_test.cc index f75b8b33bbf82..fac1300cf3261 100644 --- a/test/common/network/utility_test.cc +++ b/test/common/network/utility_test.cc @@ -245,73 +245,73 @@ TEST(NetworkUtility, GetOriginalDst) { TEST(NetworkUtility, LocalConnection) { testing::NiceMock socket; - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("127.0.0.1")); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("/pipe/path")); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("/pipe/path")); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("/pipe/path")); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("127.0.0.1")); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("127.0.0.1")); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("127.0.0.2")); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("4.4.4.4")); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("8.8.8.8")); EXPECT_FALSE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("4.4.4.4")); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("4.4.4.4")); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("4.4.4.4", 1234)); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("4.4.4.4", 4321)); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("::1")); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("::1")); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("::2")); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("::1")); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("::3")); EXPECT_FALSE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("::2")); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("::2", 4321)); - socket.address_provider_->setLocalAddress( + socket.connection_info_provider_->setLocalAddress( std::make_shared("::2", 1234)); EXPECT_TRUE(Utility::isSameIpOrLoopback(socket)); - socket.address_provider_->setRemoteAddress( + socket.connection_info_provider_->setRemoteAddress( std::make_shared("fd00::")); EXPECT_FALSE(Utility::isSameIpOrLoopback(socket)); } diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index bbb9c9c8a9130..3d5875da1b223 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -184,34 +184,6 @@ TEST_F(ProtobufUtilityTest, MessageUtilHash) { EXPECT_NE(MessageUtil::hash(s), MessageUtil::hash(a1)); } -TEST_F(ProtobufUtilityTest, MessageUtilHashAndEqualToIgnoreOriginalTypeField) { - ProtobufWkt::Struct s; - (*s.mutable_fields())["ab"].set_string_value("fgh"); - EXPECT_EQ(1, s.fields_size()); - envoy::api::v2::core::Metadata mv2; - mv2.mutable_filter_metadata()->insert({"xyz", s}); - EXPECT_EQ(1, mv2.filter_metadata_size()); - - // Add the OriginalTypeFieldNumber as unknown field. - envoy::config::core::v3::Metadata mv3; - Config::VersionConverter::upgrade(mv2, mv3); - - // Add another unknown field. - { - const Protobuf::Reflection* reflection = mv3.GetReflection(); - auto* unknown_field_set = reflection->MutableUnknownFields(&mv3); - auto set_size = unknown_field_set->field_count(); - // 183412668 is the magic number OriginalTypeFieldNumber. The successor number should not be - // occupied. - unknown_field_set->AddFixed32(183412668 + 1, 1); - EXPECT_EQ(set_size + 1, unknown_field_set->field_count()) << "Fail to add an unknown field"; - } - - envoy::config::core::v3::Metadata mv3dup = mv3; - ASSERT_EQ(MessageUtil::hash(mv3), MessageUtil::hash(mv3dup)); - ASSERT(MessageUtil()(mv3, mv3dup)); -} - TEST_F(ProtobufUtilityTest, RepeatedPtrUtilDebugString) { Protobuf::RepeatedPtrField repeated; EXPECT_EQ("[]", RepeatedPtrUtil::debugString(repeated)); @@ -381,15 +353,8 @@ TEST_F(ProtobufUtilityTest, LoadBinaryProtoUnknownFieldFromFile) { source_duration.set_seconds(42); const std::string filename = TestEnvironment::writeStringToFileForTest("proto.pb", source_duration.SerializeAsString()); - // Verify without boosting envoy::config::bootstrap::v3::Bootstrap proto_from_file; - EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_, false), - EnvoyException, - "Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with " - "unknown field set {1}) has unknown fields"); - - // Verify with boosting - EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_, true), + EXPECT_THROW_WITH_MESSAGE(TestUtility::loadFromFile(filename, proto_from_file, *api_), EnvoyException, "Protobuf message (type envoy.config.bootstrap.v3.Bootstrap with " "unknown field set {1}) has unknown fields"); @@ -446,21 +411,6 @@ TEST_F(ProtobufUtilityTest, LoadJsonFromFileNoBoosting) { EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); } -TEST_F(ProtobufV2ApiUtilityTest, DEPRECATED_FEATURE_TEST(LoadV2TextProtoFromFile)) { - API_NO_BOOST(envoy::config::bootstrap::v2::Bootstrap) bootstrap; - bootstrap.mutable_node()->set_build_version("foo"); - - std::string bootstrap_text; - ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text)); - const std::string filename = - TestEnvironment::writeStringToFileForTest("proto.pb_text", bootstrap_text); - - API_NO_BOOST(envoy::config::bootstrap::v3::Bootstrap) proto_from_file; - TestUtility::loadFromFile(filename, proto_from_file, *api_); - EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); - EXPECT_EQ("foo", proto_from_file.node().hidden_envoy_deprecated_build_version()); -} - TEST_F(ProtobufUtilityTest, LoadTextProtoFromFile_Failure) { const std::string filename = TestEnvironment::writeStringToFileForTest("proto.pb_text", "invalid {"); @@ -1416,56 +1366,6 @@ TEST_F(ProtobufUtilityTest, UnpackToSameVersion) { } } -// MessageUtility::unpackTo() with API message works across version. -TEST_F(ProtobufV2ApiUtilityTest, UnpackToNextVersion) { - API_NO_BOOST(envoy::api::v2::Cluster) source; - source.set_drain_connections_on_host_removal(true); - ProtobufWkt::Any source_any; - source_any.PackFrom(source); - API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; - MessageUtil::unpackTo(source_any, dst); - EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); - EXPECT_TRUE(dst.ignore_health_on_host_removal()); -} - -// MessageUtility::unpackTo() with API message works across version and doesn't register -// deprecations for allowlisted v2 protos. -TEST_F(ProtobufV2ApiUtilityTest, UnpackToNextVersionV2Allowed) { - API_NO_BOOST(envoy::config::health_checker::redis::v2::Redis) source; - source.set_key("foo"); - ProtobufWkt::Any source_any; - source_any.PackFrom(source); - API_NO_BOOST(envoy::extensions::health_checkers::redis::v3::Redis) dst; - MessageUtil::unpackTo(source_any, dst); - EXPECT_EQ(runtime_deprecated_feature_use_.value(), 0); - EXPECT_EQ(dst.key(), "foo"); -} - -// Validate warning messages on v2 upgrades. -TEST_F(ProtobufV2ApiUtilityTest, V2UpgradeWarningLogs) { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; - // First attempt works. - EXPECT_LOG_CONTAINS("warn", "Configuration does not parse cleanly as v3", - MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, - ProtobufMessage::getNullValidationVisitor())); - // Second attempt immediately after fails. - EXPECT_LOG_NOT_CONTAINS("warn", "Configuration does not parse cleanly as v3", - MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", - dst, - ProtobufMessage::getNullValidationVisitor())); - // Third attempt works, since this is a different log message. - EXPECT_LOG_CONTAINS("warn", "Configuration does not parse cleanly as v3", - MessageUtil::loadFromJson("{drain_connections_on_host_removal: false}", dst, - ProtobufMessage::getNullValidationVisitor())); - // This is kind of terrible, but it's hard to do dependency injection at - // onVersionUpgradeDeprecation(). - std::this_thread::sleep_for(5s); // NOLINT - // We can log the original warning again. - EXPECT_LOG_CONTAINS("warn", "Configuration does not parse cleanly as v3", - MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, - ProtobufMessage::getNullValidationVisitor())); -} - // MessageUtility::loadFromJson() throws on garbage JSON. TEST_F(ProtobufUtilityTest, LoadFromJsonGarbage) { envoy::config::cluster::v3::Cluster dst; @@ -1511,42 +1411,10 @@ TEST_F(ProtobufUtilityTest, LoadFromJsonNoBoosting) { envoy::config::cluster::v3::Cluster dst; EXPECT_THROW_WITH_REGEX( MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, - ProtobufMessage::getStrictValidationVisitor(), false), + ProtobufMessage::getStrictValidationVisitor()), EnvoyException, "INVALID_ARGUMENT:drain_connections_on_host_removal: Cannot find field."); } -// MessageUtility::loadFromJson() with API message works across version. -TEST_F(ProtobufV2ApiUtilityTest, LoadFromJsonNextVersion) { - { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; - MessageUtil::loadFromJson("{use_tcp_for_dns_lookups: true}", dst, - ProtobufMessage::getNullValidationVisitor()); - EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); - EXPECT_TRUE(dst.use_tcp_for_dns_lookups()); - } - { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; - MessageUtil::loadFromJson("{use_tcp_for_dns_lookups: true}", dst, - ProtobufMessage::getStrictValidationVisitor()); - EXPECT_EQ(0, runtime_deprecated_feature_use_.value()); - EXPECT_TRUE(dst.use_tcp_for_dns_lookups()); - } - { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; - MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, - ProtobufMessage::getNullValidationVisitor()); - EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); - EXPECT_TRUE(dst.ignore_health_on_host_removal()); - } - { - API_NO_BOOST(envoy::config::cluster::v3::Cluster) dst; - MessageUtil::loadFromJson("{drain_connections_on_host_removal: true}", dst, - ProtobufMessage::getStrictValidationVisitor()); - EXPECT_GT(runtime_deprecated_feature_use_.value(), 0); - EXPECT_TRUE(dst.ignore_health_on_host_removal()); - } -} - TEST_F(ProtobufUtilityTest, JsonConvertSuccess) { envoy::config::bootstrap::v3::Bootstrap source; source.set_flags_path("foo"); @@ -1692,27 +1560,14 @@ TEST(DurationUtilTest, OutOfRange) { } } -class DeprecatedFieldsTest : public testing::TestWithParam, protected RuntimeStatsHelper { +class DeprecatedFieldsTest : public testing::Test, protected RuntimeStatsHelper { protected: - DeprecatedFieldsTest() : with_upgrade_(GetParam()) {} - void checkForDeprecation(const Protobuf::Message& message) { - if (with_upgrade_) { - envoy::test::deprecation_test::UpgradedBase upgraded_message; - Config::VersionConverter::upgrade(message, upgraded_message); - MessageUtil::checkForUnexpectedFields(upgraded_message, - ProtobufMessage::getStrictValidationVisitor()); - } else { - MessageUtil::checkForUnexpectedFields(message, ProtobufMessage::getStrictValidationVisitor()); - } + MessageUtil::checkForUnexpectedFields(message, ProtobufMessage::getStrictValidationVisitor()); } - - const bool with_upgrade_; }; -INSTANTIATE_TEST_SUITE_P(Versions, DeprecatedFieldsTest, testing::Values(false, true)); - -TEST_P(DeprecatedFieldsTest, NoCrashIfRuntimeMissing) { +TEST_F(DeprecatedFieldsTest, NoCrashIfRuntimeMissing) { loader_.reset(); envoy::test::deprecation_test::Base base; @@ -1721,7 +1576,7 @@ TEST_P(DeprecatedFieldsTest, NoCrashIfRuntimeMissing) { checkForDeprecation(base); } -TEST_P(DeprecatedFieldsTest, NoErrorWhenDeprecatedFieldsUnused) { +TEST_F(DeprecatedFieldsTest, NoErrorWhenDeprecatedFieldsUnused) { envoy::test::deprecation_test::Base base; base.set_not_deprecated("foo"); // Fatal checks for a non-deprecated field should cause no problem. @@ -1730,7 +1585,7 @@ TEST_P(DeprecatedFieldsTest, NoErrorWhenDeprecatedFieldsUnused) { EXPECT_EQ(0, deprecated_feature_seen_since_process_start_.value()); } -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecatedEmitsError)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecatedEmitsError)) { envoy::test::deprecation_test::Base base; base.set_is_deprecated("foo"); // Non-fatal checks for a deprecated field should log rather than throw an exception. @@ -1741,7 +1596,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDeprecatedEm EXPECT_EQ(1, deprecated_feature_seen_since_process_start_.value()); } -TEST_P(DeprecatedFieldsTest, IndividualFieldDeprecatedEmitsCrash) { +TEST_F(DeprecatedFieldsTest, IndividualFieldDeprecatedEmitsCrash) { envoy::test::deprecation_test::Base base; base.set_is_deprecated("foo"); // Non-fatal checks for a deprecated field should throw an exception if the @@ -1757,7 +1612,7 @@ TEST_P(DeprecatedFieldsTest, IndividualFieldDeprecatedEmitsCrash) { } // Use of a deprecated and disallowed field should result in an exception. -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowed)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowed)) { envoy::test::deprecation_test::Base base; base.set_is_deprecated_fatal("foo"); EXPECT_THROW_WITH_REGEX( @@ -1765,7 +1620,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowed)) "Using deprecated option 'envoy.test.deprecation_test.Base.is_deprecated_fatal'"); } -TEST_P(DeprecatedFieldsTest, +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowedWithRuntimeOverride)) { envoy::test::deprecation_test::Base base; base.set_is_deprecated_fatal("foo"); @@ -1792,7 +1647,7 @@ TEST_P(DeprecatedFieldsTest, } // Test that a deprecated field is allowed with runtime global override. -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowedWithGlobalOverride)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowedWithGlobalOverride)) { envoy::test::deprecation_test::Base base; base.set_is_deprecated_fatal("foo"); @@ -1816,7 +1671,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(IndividualFieldDisallowedWi EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); } -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(DisallowViaRuntime)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(DisallowViaRuntime)) { envoy::test::deprecation_test::Base base; base.set_is_deprecated("foo"); @@ -1848,7 +1703,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(DisallowViaRuntime)) { // Note that given how Envoy config parsing works, the first time we hit a // 'fatal' error and throw, we won't log future warnings. That said, this tests // the case of the warning occurring before the fatal error. -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MixOfFatalAndWarnings)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MixOfFatalAndWarnings)) { envoy::test::deprecation_test::Base base; base.set_is_deprecated("foo"); base.set_is_deprecated_fatal("foo"); @@ -1861,7 +1716,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MixOfFatalAndWarnings)) { } // Present (unused) deprecated messages should be detected as deprecated. -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MessageDeprecated)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MessageDeprecated)) { envoy::test::deprecation_test::Base base; base.mutable_deprecated_message(); EXPECT_LOG_CONTAINS( @@ -1870,7 +1725,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(MessageDeprecated)) { EXPECT_EQ(1, runtime_deprecated_feature_use_.value()); } -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(InnerMessageDeprecated)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(InnerMessageDeprecated)) { envoy::test::deprecation_test::Base base; base.mutable_not_deprecated_message()->set_inner_not_deprecated("foo"); // Checks for a non-deprecated field shouldn't trigger warnings @@ -1885,7 +1740,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(InnerMessageDeprecated)) { } // Check that repeated sub-messages get validated. -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(SubMessageDeprecated)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(SubMessageDeprecated)) { envoy::test::deprecation_test::Base base; base.add_repeated_message(); base.add_repeated_message()->set_inner_deprecated("foo"); @@ -1899,7 +1754,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(SubMessageDeprecated)) { } // Check that deprecated repeated messages trigger -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RepeatedMessageDeprecated)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RepeatedMessageDeprecated)) { envoy::test::deprecation_test::Base base; base.add_deprecated_repeated_message(); @@ -1911,7 +1766,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RepeatedMessageDeprecated)) } // Check that deprecated enum values trigger for default values -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecatedDefault)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecatedDefault)) { envoy::test::deprecation_test::Base base; base.mutable_enum_container(); @@ -1925,7 +1780,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecatedDefault } // Check that deprecated enum values trigger for non-default values -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecated)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecated)) { envoy::test::deprecation_test::Base base; base.mutable_enum_container()->set_deprecated_enum( envoy::test::deprecation_test::Base::DEPRECATED_NOT_DEFAULT); @@ -1940,7 +1795,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(EnumValuesDeprecated)) { // Make sure the runtime overrides for protos work, by checking the non-fatal to // fatal option. -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RuntimeOverrideEnumDefault)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RuntimeOverrideEnumDefault)) { envoy::test::deprecation_test::Base base; base.mutable_enum_container(); @@ -1963,7 +1818,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(RuntimeOverrideEnumDefault) } // Make sure the runtime overrides for allowing fatal enums work. -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnum)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnum)) { envoy::test::deprecation_test::Base base; base.mutable_enum_container()->set_deprecated_enum( envoy::test::deprecation_test::Base::DEPRECATED_FATAL); @@ -1984,7 +1839,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnum)) { } // Make sure the runtime global override for allowing fatal enums work. -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnumGlobalOverride)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnumGlobalOverride)) { envoy::test::deprecation_test::Base base; base.mutable_enum_container()->set_deprecated_enum( envoy::test::deprecation_test::Base::DEPRECATED_FATAL); @@ -2006,7 +1861,7 @@ TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(FatalEnumGlobalOverride)) { // Verify that direct use of a hidden_envoy_deprecated field fails, but upgrade // succeeds -TEST_P(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(ManualDeprecatedFieldAddition)) { +TEST_F(DeprecatedFieldsTest, DEPRECATED_FEATURE_TEST(ManualDeprecatedFieldAddition)) { // Create a base message and insert a deprecated field. When upgrading the // deprecated field should be set as deprecated, and a warning should be logged envoy::test::deprecation_test::Base base_should_warn = diff --git a/test/common/quic/BUILD b/test/common/quic/BUILD index 943fd4b241470..4c16057ecbba6 100644 --- a/test/common/quic/BUILD +++ b/test/common/quic/BUILD @@ -49,8 +49,8 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:context_config_lib", "//test/mocks/network:network_mocks", "//test/mocks/ssl:ssl_mocks", - "@com_googlesource_quiche//:quic_core_versions_lib", - "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", + "@com_github_google_quiche//:quic_core_versions_lib", + "@com_github_google_quiche//:quic_test_tools_test_certificates_lib", ], ) @@ -85,7 +85,7 @@ envoy_cc_test( "//source/common/quic:envoy_quic_proof_verifier_lib", "//source/extensions/transport_sockets/tls:context_config_lib", "//test/mocks/ssl:ssl_mocks", - "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", + "@com_github_google_quiche//:quic_test_tools_test_certificates_lib", ], ) @@ -106,9 +106,9 @@ envoy_cc_test( "//test/mocks/http:stream_decoder_mock", "//test/mocks/network:network_mocks", "//test/test_common:utility_lib", - "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", - "@com_googlesource_quiche//:quic_test_tools_qpack_qpack_test_utils_lib", - "@com_googlesource_quiche//:quic_test_tools_session_peer_lib", + "@com_github_google_quiche//:quic_core_http_spdy_session_lib", + "@com_github_google_quiche//:quic_test_tools_qpack_qpack_test_utils_lib", + "@com_github_google_quiche//:quic_test_tools_session_peer_lib", ], ) @@ -128,8 +128,8 @@ envoy_cc_test( "//test/mocks/http:stream_decoder_mock", "//test/mocks/network:network_mocks", "//test/test_common:utility_lib", - "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", - "@com_googlesource_quiche//:quic_test_tools_qpack_qpack_test_utils_lib", + "@com_github_google_quiche//:quic_core_http_spdy_session_lib", + "@com_github_google_quiche//:quic_test_tools_qpack_qpack_test_utils_lib", ], ) @@ -156,9 +156,9 @@ envoy_cc_test( "//test/test_common:global_lib", "//test/test_common:logging_lib", "//test/test_common:simulated_time_system_lib", - "@com_googlesource_quiche//:quic_test_tools_config_peer_lib", - "@com_googlesource_quiche//:quic_test_tools_server_session_base_peer", - "@com_googlesource_quiche//:quic_test_tools_test_utils_lib", + "@com_github_google_quiche//:quic_test_tools_config_peer_lib", + "@com_github_google_quiche//:quic_test_tools_server_session_base_peer", + "@com_github_google_quiche//:quic_test_tools_test_utils_lib", ], ) @@ -182,6 +182,7 @@ envoy_cc_test( "//test/mocks/stats:stats_mocks", "//test/test_common:logging_lib", "//test/test_common:simulated_time_system_lib", + "@com_github_google_quiche//:quic_test_tools_session_peer_lib", ], ) @@ -204,7 +205,7 @@ envoy_cc_test( "//test/mocks/server:instance_mocks", "//test/test_common:network_utility_lib", "//test/test_common:simulated_time_system_lib", - "@com_googlesource_quiche//:quic_test_tools_crypto_server_config_peer_lib", + "@com_github_google_quiche//:quic_test_tools_crypto_server_config_peer_lib", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", ], ) @@ -241,7 +242,7 @@ envoy_cc_test_library( deps = [ "//source/common/quic:envoy_quic_proof_source_base_lib", "//test/mocks/network:network_mocks", - "@com_googlesource_quiche//:quic_test_tools_test_certificates_lib", + "@com_github_google_quiche//:quic_test_tools_test_certificates_lib", ], ) @@ -261,7 +262,7 @@ envoy_cc_test_library( deps = [ ":test_proof_source_lib", ":test_proof_verifier_lib", - "@com_googlesource_quiche//:quic_test_tools_test_utils_lib", + "@com_github_google_quiche//:quic_test_tools_test_utils_lib", ], ) @@ -330,8 +331,8 @@ envoy_cc_test_library( "//source/common/quic:envoy_quic_server_connection_lib", "//source/common/quic:quic_filter_manager_connection_lib", "//test/test_common:environment_lib", - "@com_googlesource_quiche//:quic_core_http_spdy_session_lib", - "@com_googlesource_quiche//:quic_test_tools_first_flight_lib", - "@com_googlesource_quiche//:quic_test_tools_qpack_qpack_encoder_test_utils_lib", + "@com_github_google_quiche//:quic_core_http_spdy_session_lib", + "@com_github_google_quiche//:quic_test_tools_first_flight_lib", + "@com_github_google_quiche//:quic_test_tools_qpack_qpack_encoder_test_utils_lib", ], ) diff --git a/test/common/quic/active_quic_listener_test.cc b/test/common/quic/active_quic_listener_test.cc index 13da84735c546..06b49bd5cac6a 100644 --- a/test/common/quic/active_quic_listener_test.cc +++ b/test/common/quic/active_quic_listener_test.cc @@ -73,21 +73,14 @@ class ActiveQuicListenerFactoryPeer { } }; -class ActiveQuicListenerTest : public QuicMultiVersionTest { +class ActiveQuicListenerTest : public testing::TestWithParam { protected: ActiveQuicListenerTest() - : version_(GetParam().first), api_(Api::createApiForTest(simulated_time_system_)), + : version_(GetParam()), api_(Api::createApiForTest(simulated_time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), clock_(*dispatcher_), local_address_(Network::Test::getCanonicalLoopbackAddress(version_)), - connection_handler_(*dispatcher_, absl::nullopt), quic_version_([]() { - if (GetParam().second == QuicVersionType::GquicQuicCrypto) { - return quic::CurrentSupportedVersionsWithQuicCrypto(); - } - bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); - SetQuicReloadableFlag(quic_disable_version_rfcv1, !use_http3); - return quic::CurrentSupportedVersions(); - }()[0]), + connection_handler_(*dispatcher_, absl::nullopt), + quic_version_(quic::CurrentSupportedHttp3Versions()[0]), quic_stat_names_(listener_config_.listenerScope().symbolTable()) {} template @@ -209,16 +202,14 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { void sendCHLO(quic::QuicConnectionId connection_id) { client_sockets_.push_back(std::make_unique(Network::Socket::Type::Datagram, local_address_, nullptr)); - Buffer::OwnedImpl payload = generateChloPacketToSend( - quic_version_, quic_config_, ActiveQuicListenerPeer::cryptoConfig(*quic_listener_), - connection_id, clock_, envoyIpAddressToQuicSocketAddress(local_address_->ip()), - envoyIpAddressToQuicSocketAddress(local_address_->ip()), "test.example.org"); + Buffer::OwnedImpl payload = + generateChloPacketToSend(quic_version_, quic_config_, connection_id); Buffer::RawSliceVector slice = payload.getRawSlices(); ASSERT_EQ(1u, slice.size()); // Send a full CHLO to finish 0-RTT handshake. - auto send_rc = - Network::Utility::writeToSocket(client_sockets_.back()->ioHandle(), slice.data(), 1, - nullptr, *listen_socket_->addressProvider().localAddress()); + auto send_rc = Network::Utility::writeToSocket( + client_sockets_.back()->ioHandle(), slice.data(), 1, nullptr, + *listen_socket_->connectionInfoProvider().localAddress()); ASSERT_EQ(slice[0].len_, send_rc.return_value_); #if defined(__APPLE__) @@ -332,7 +323,8 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { }; INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerTests, ActiveQuicListenerTest, - testing::ValuesIn(generateTestParam()), testParamsToString); + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { initialize(); @@ -355,22 +347,14 @@ TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { EXPECT_EQ(quic::kMinimumFlowControlSendWindow, const_cast(session) ->config() ->GetInitialSessionFlowControlWindowToSend()); - // IETF Quic supports low flow control limit. But Google Quic only supports flow control window no - // smaller than 16kB. - if (GetParam().second == QuicVersionType::Iquic) { - EXPECT_EQ(stream_window_size_, const_cast(session) - ->config() - ->GetInitialMaxStreamDataBytesIncomingBidirectionalToSend()); - } else { - EXPECT_EQ(quic::kMinimumFlowControlSendWindow, const_cast(session) - ->config() - ->GetInitialStreamFlowControlWindowToSend()); - } + EXPECT_EQ(stream_window_size_, const_cast(session) + ->config() + ->GetInitialMaxStreamDataBytesIncomingBidirectionalToSend()); readFromClientSockets(); } TEST_P(ActiveQuicListenerTest, ConfigureReasonableInitialFlowControlWindow) { - // These initial flow control windows should be accepted by both Google QUIC and IETF QUIC. + // These initial flow control windows should be accepted by QUIC. connection_window_size_ = 64 * 1024; stream_window_size_ = 32 * 1024; initialize(); @@ -464,7 +448,8 @@ class ActiveQuicListenerEmptyFlagConfigTest : public ActiveQuicListenerTest { INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerEmptyFlagConfigTests, ActiveQuicListenerEmptyFlagConfigTest, - testing::ValuesIn(generateTestParam()), testParamsToString); + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); // Quic listener should be enabled by default, if not enabled explicitly in config. TEST_P(ActiveQuicListenerEmptyFlagConfigTest, ReceiveFullQuicCHLO) { diff --git a/test/common/quic/envoy_quic_client_session_test.cc b/test/common/quic/envoy_quic_client_session_test.cc index 812818016fe2f..92f54a87f948c 100644 --- a/test/common/quic/envoy_quic_client_session_test.cc +++ b/test/common/quic/envoy_quic_client_session_test.cc @@ -6,6 +6,7 @@ #include "quiche/quic/core/crypto/null_encrypter.h" #include "quiche/quic/test_tools/crypto_test_utils.h" +#include "quiche/quic/test_tools/quic_session_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" #if defined(__GNUC__) @@ -64,15 +65,14 @@ class TestEnvoyQuicClientConnection : public EnvoyQuicClientConnection { using EnvoyQuicClientConnection::connectionStats; }; -class EnvoyQuicClientSessionTest : public testing::TestWithParam { +class EnvoyQuicClientSessionTest : public testing::Test { public: EnvoyQuicClientSessionTest() : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); - SetQuicReloadableFlag(quic_disable_version_rfcv1, !GetParam()); - return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); + SetQuicReloadableFlag(quic_decline_server_push_stream, true); + return quic::CurrentSupportedHttp3Versions(); }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), 12345)), @@ -163,10 +163,7 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { QuicHttpClientConnectionImpl http_connection_; }; -INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientSessionTests, EnvoyQuicClientSessionTest, - testing::ValuesIn({true, false})); - -TEST_P(EnvoyQuicClientSessionTest, NewStream) { +TEST_F(EnvoyQuicClientSessionTest, NewStream) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); @@ -183,7 +180,7 @@ TEST_P(EnvoyQuicClientSessionTest, NewStream) { stream.OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers); } -TEST_P(EnvoyQuicClientSessionTest, PacketLimits) { +TEST_F(EnvoyQuicClientSessionTest, PacketLimits) { // We always allow for reading packets, even if there's no stream. EXPECT_EQ(0, envoy_quic_session_.GetNumActiveStreams()); EXPECT_EQ(16, envoy_quic_session_.numPacketsExpectedPerEventLoop()); @@ -222,7 +219,7 @@ TEST_P(EnvoyQuicClientSessionTest, PacketLimits) { envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush); } -TEST_P(EnvoyQuicClientSessionTest, OnResetFrame) { +TEST_F(EnvoyQuicClientSessionTest, OnResetFrame) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); @@ -232,23 +229,40 @@ TEST_P(EnvoyQuicClientSessionTest, OnResetFrame) { quic::QuicRstStreamFrame rst1(/*control_frame_id=*/1u, stream_id, quic::QUIC_ERROR_PROCESSING_STREAM, /*bytes_written=*/0u); EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::RemoteReset, _)); - stream.OnStreamReset(rst1); + envoy_quic_session_.OnRstStream(rst1); + + EXPECT_EQ( + 1U, TestUtility::findCounter( + store_, "http3.upstream.rx.quic_reset_stream_error_code_QUIC_ERROR_PROCESSING_STREAM") + ->value()); +} + +TEST_F(EnvoyQuicClientSessionTest, SendResetFrame) { + Http::MockResponseDecoder response_decoder; + Http::MockStreamCallbacks stream_callbacks; + EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); + + // IETF bi-directional stream. + quic::QuicStreamId stream_id = stream.id(); + EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::LocalReset, _)); + EXPECT_CALL(*quic_connection_, SendControlFrame(_)); + envoy_quic_session_.ResetStream(stream_id, quic::QUIC_ERROR_PROCESSING_STREAM); + + EXPECT_EQ( + 1U, TestUtility::findCounter( + store_, "http3.upstream.tx.quic_reset_stream_error_code_QUIC_ERROR_PROCESSING_STREAM") + ->value()); } -TEST_P(EnvoyQuicClientSessionTest, OnGoAwayFrame) { +TEST_F(EnvoyQuicClientSessionTest, OnGoAwayFrame) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EXPECT_CALL(http_connection_callbacks_, onGoAway(Http::GoAwayErrorCode::NoError)); - if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { - envoy_quic_session_.OnHttp3GoAway(4u); - } else { - quic::QuicGoAwayFrame goaway; - quic_connection_->OnGoAwayFrame(goaway); - } + envoy_quic_session_.OnHttp3GoAway(4u); } -TEST_P(EnvoyQuicClientSessionTest, ConnectionClose) { +TEST_F(EnvoyQuicClientSessionTest, ConnectionClose) { std::string error_details("dummy details"); quic::QuicErrorCode error(quic::QUIC_INVALID_FRAME_DATA); quic::QuicConnectionCloseFrame frame(quic_version_[0].transport_version, error, @@ -266,7 +280,7 @@ TEST_P(EnvoyQuicClientSessionTest, ConnectionClose) { ->value()); } -TEST_P(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) { +TEST_F(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) { Http::MockResponseDecoder response_decoder; Http::MockStreamCallbacks stream_callbacks; EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); @@ -277,98 +291,32 @@ TEST_P(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) { envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); EXPECT_TRUE(stream.write_side_closed() && stream.reading_stopped()); + EXPECT_EQ(1U, TestUtility::findCounter( + store_, "http3.upstream.tx.quic_connection_close_error_code_QUIC_NO_ERROR") + ->value()); } -class EnvoyQuicClientSessionAllQuicVersionTest - : public testing::TestWithParam { -public: - EnvoyQuicClientSessionAllQuicVersionTest() - : api_(Api::createApiForTest(time_system_)), - dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), - peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), - 12345)), - self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), - 54321)), - quic_connection_(new TestEnvoyQuicClientConnection( - quic::test::TestConnectionId(), connection_helper_, alarm_factory_, writer_, - quic::test::SupportedVersions(GetParam()), *dispatcher_, - createConnectionSocket(peer_addr_, self_addr_, nullptr))), - crypto_config_(std::make_shared( - quic::test::crypto_test_utils::ProofVerifierForTesting())), - quic_stat_names_(store_.symbolTable()), - envoy_quic_session_( - quic_config_, quic::test::SupportedVersions(GetParam()), - std::unique_ptr(quic_connection_), - quic::QuicServerId("example.com", 443, false), crypto_config_, nullptr, *dispatcher_, - /*send_buffer_limit*/ 1024 * 1024, crypto_stream_factory_, quic_stat_names_, store_), - stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(store_, "http3."), - POOL_GAUGE_PREFIX(store_, "http3."))}), - http_connection_(envoy_quic_session_, http_connection_callbacks_, stats_, http3_options_, - 64 * 1024, 100) { - EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime()); - EXPECT_EQ(EMPTY_STRING, envoy_quic_session_.nextProtocol()); - EXPECT_EQ(Http::Protocol::Http3, http_connection_.protocol()); - - time_system_.advanceTimeWait(std::chrono::milliseconds(1)); - ON_CALL(writer_, WritePacket(_, _, _, _, _)) - .WillByDefault(testing::Return(quic::WriteResult(quic::WRITE_STATUS_OK, 1))); - } - - void SetUp() override { - envoy_quic_session_.Initialize(); - setQuicConfigWithDefaultValues(envoy_quic_session_.config()); - envoy_quic_session_.OnConfigNegotiated(); - envoy_quic_session_.addConnectionCallbacks(network_connection_callbacks_); - envoy_quic_session_.setConnectionStats( - {read_total_, read_current_, write_total_, write_current_, nullptr, nullptr}); - EXPECT_EQ(&read_total_, &quic_connection_->connectionStats().read_total_); - } - - void TearDown() override { - if (quic_connection_->connected()) { - EXPECT_CALL(*quic_connection_, - SendConnectionClosePacket(quic::QUIC_NO_ERROR, _, "Closed by application")); - EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); - envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush); - } - } - -protected: - Event::SimulatedTimeSystemHelper time_system_; - Api::ApiPtr api_; - Event::DispatcherPtr dispatcher_; - EnvoyQuicConnectionHelper connection_helper_; - EnvoyQuicAlarmFactory alarm_factory_; - testing::NiceMock writer_; - Network::Address::InstanceConstSharedPtr peer_addr_; - Network::Address::InstanceConstSharedPtr self_addr_; - TestEnvoyQuicClientConnection* quic_connection_; - quic::QuicConfig quic_config_; - std::shared_ptr crypto_config_; - TestQuicCryptoClientStreamFactory crypto_stream_factory_; - Stats::IsolatedStoreImpl store_; - QuicStatNames quic_stat_names_; - EnvoyQuicClientSession envoy_quic_session_; - Network::MockConnectionCallbacks network_connection_callbacks_; - Http::MockServerConnectionCallbacks http_connection_callbacks_; - testing::StrictMock read_total_; - testing::StrictMock read_current_; - testing::StrictMock write_total_; - testing::StrictMock write_current_; - Http::Http3::CodecStats stats_; - envoy::config::core::v3::Http3ProtocolOptions http3_options_; - QuicHttpClientConnectionImpl http_connection_; -}; - -INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientSessionAllQuicVersionTests, - EnvoyQuicClientSessionAllQuicVersionTest, - testing::ValuesIn(quic::AllSupportedVersions())); +TEST_F(EnvoyQuicClientSessionTest, HandshakeTimesOutWithActiveStream) { + Http::MockResponseDecoder response_decoder; + Http::MockStreamCallbacks stream_callbacks; + EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); + EXPECT_CALL(*quic_connection_, + SendConnectionClosePacket(quic::QUIC_HANDSHAKE_FAILED, _, "fake handshake time out")); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); + EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionFailure, _)); + envoy_quic_session_.OnStreamError(quic::QUIC_HANDSHAKE_FAILED, "fake handshake time out"); + EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); + EXPECT_TRUE(stream.write_side_closed() && stream.reading_stopped()); + EXPECT_EQ(1U, + TestUtility::findCounter( + store_, "http3.upstream.tx.quic_connection_close_error_code_QUIC_HANDSHAKE_FAILED") + ->value()); +} -TEST_P(EnvoyQuicClientSessionAllQuicVersionTest, ConnectionClosePopulatesQuicVersionStats) { +TEST_F(EnvoyQuicClientSessionTest, ConnectionClosePopulatesQuicVersionStats) { std::string error_details("dummy details"); quic::QuicErrorCode error(quic::QUIC_INVALID_FRAME_DATA); - quic::QuicConnectionCloseFrame frame(GetParam().transport_version, error, + quic::QuicConnectionCloseFrame frame(quic_version_[0].transport_version, error, quic::NO_IETF_QUIC_ERROR, error_details, /* transport_close_frame_type = */ 0); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose)); @@ -376,32 +324,24 @@ TEST_P(EnvoyQuicClientSessionAllQuicVersionTest, ConnectionClosePopulatesQuicVer EXPECT_EQ(absl::StrCat(quic::QuicErrorCodeToString(error), " with details: ", error_details), envoy_quic_session_.transportFailureReason()); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); - std::string quic_version_stat_name; - switch (GetParam().transport_version) { - case quic::QUIC_VERSION_43: - quic_version_stat_name = "43"; - break; - case quic::QUIC_VERSION_46: - quic_version_stat_name = "46"; - break; - case quic::QUIC_VERSION_50: - quic_version_stat_name = "50"; - break; - case quic::QUIC_VERSION_51: - quic_version_stat_name = "51"; - break; - case quic::QUIC_VERSION_IETF_DRAFT_29: - quic_version_stat_name = "h3_29"; - break; - case quic::QUIC_VERSION_IETF_RFC_V1: - quic_version_stat_name = "rfc_v1"; - break; - default: - break; - } - EXPECT_EQ(1U, TestUtility::findCounter( - store_, absl::StrCat("http3.quic_version_", quic_version_stat_name)) - ->value()); + EXPECT_EQ(1U, TestUtility::findCounter(store_, "http3.quic_version_rfc_v1")->value()); +} + +TEST_F(EnvoyQuicClientSessionTest, IncomingUnidirectionalReadStream) { + quic::QuicStreamId stream_id = 1u; + quic::QuicStreamFrame stream_frame(stream_id, false, 0, "aaa"); + envoy_quic_session_.OnStreamFrame(stream_frame); + EXPECT_FALSE(quic::test::QuicSessionPeer::IsStreamCreated(&envoy_quic_session_, stream_id)); + // IETF stream 3 is server initiated uni-directional stream. + stream_id = 3u; + auto payload = std::make_unique(8); + quic::QuicDataWriter payload_writer(8, payload.get()); + EXPECT_TRUE(payload_writer.WriteVarInt62(1ul)); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); + EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(quic::QUIC_HTTP_RECEIVE_SERVER_PUSH, _, + "Received server push stream")); + quic::QuicStreamFrame stream_frame2(stream_id, false, 0, absl::string_view(payload.get(), 1)); + envoy_quic_session_.OnStreamFrame(stream_frame2); } } // namespace Quic diff --git a/test/common/quic/envoy_quic_client_stream_test.cc b/test/common/quic/envoy_quic_client_stream_test.cc index 5de4695bb4794..3433def5dd11f 100644 --- a/test/common/quic/envoy_quic_client_stream_test.cc +++ b/test/common/quic/envoy_quic_client_stream_test.cc @@ -1,3 +1,15 @@ +#if defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif + +#include "quiche/quic/core/crypto/null_encrypter.h" + +#if defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + #include "source/common/quic/envoy_quic_alarm_factory.h" #include "source/common/quic/envoy_quic_client_connection.h" #include "source/common/quic/envoy_quic_client_stream.h" @@ -24,16 +36,13 @@ class MockDelegate : public PacketsToReadDelegate { MOCK_METHOD(size_t, numPacketsExpectedPerEventLoop, ()); }; -class EnvoyQuicClientStreamTest : public testing::TestWithParam { +class EnvoyQuicClientStreamTest : public testing::Test { public: EnvoyQuicClientStreamTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); - SetQuicReloadableFlag(quic_disable_version_rfcv1, !GetParam()); - return quic::CurrentSupportedVersions()[0]; - }()), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), + quic_version_(quic::CurrentSupportedHttp3Versions()[0]), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), 12345)), self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), @@ -46,9 +55,8 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { std::unique_ptr(quic_connection_), *dispatcher_, quic_config_.GetInitialStreamFlowControlWindowToSend() * 2, crypto_stream_factory_), - stream_id_(quic::VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), - stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(scope_, "http3."), - POOL_GAUGE_PREFIX(scope_, "http3."))}), + stream_id_(4u), stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(scope_, "http3."), + POOL_GAUGE_PREFIX(scope_, "http3."))}), quic_stream_(new EnvoyQuicClientStream(stream_id_, &quic_session_, quic::BIDIRECTIONAL, stats_, http3_options_)), request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}}, @@ -74,22 +82,16 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { void SetUp() override { quic_session_.Initialize(); quic_connection_->setEnvoyConnection(quic_session_); + quic_connection_->SetEncrypter( + quic::ENCRYPTION_FORWARD_SECURE, + std::make_unique(quic::Perspective::IS_CLIENT)); + quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); + setQuicConfigWithDefaultValues(quic_session_.config()); quic_session_.OnConfigNegotiated(); quic_connection_->setUpConnectionSocket(delegate_); - response_headers_.OnHeaderBlockStart(); - response_headers_.OnHeader(":status", "200"); - response_headers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, - /*compressed_header_bytes=*/0); spdy_response_headers_[":status"] = "200"; - trailers_.OnHeaderBlockStart(); - trailers_.OnHeader("key1", "value1"); - if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { - // ":final-offset" is required and stripped off by quic. - trailers_.OnHeader(":final-offset", absl::StrCat("", response_body_.length())); - } - trailers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0); spdy_trailers_["key1"] = "value1"; } @@ -101,13 +103,6 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { } } - std::string bodyToStreamPayload(const std::string& body) { - if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { - return body; - } - return bodyToHttp3StreamPayload(body); - } - size_t receiveResponse(const std::string& payload, bool fin, size_t offset = 0) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) { @@ -119,21 +114,12 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { EXPECT_EQ(payload, buffer.toString()); EXPECT_EQ(fin, finished_reading); })); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_response_headers_), - bodyToStreamPayload(payload)); - quic::QuicStreamFrame frame(stream_id_, fin, offset, data); - quic_stream_->OnStreamFrame(frame); - EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - return offset + data.length(); - } - quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(), - response_headers_); - - quic::QuicStreamFrame frame(stream_id_, fin, offset, payload); + std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_response_headers_), + bodyToHttp3StreamPayload(payload)); + quic::QuicStreamFrame frame(stream_id_, fin, offset, data); quic_stream_->OnStreamFrame(frame); EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - return offset + payload.length(); + return offset + data.length(); } protected: @@ -160,39 +146,28 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { std::string host_{"www.abc.com"}; Http::TestRequestHeaderMapImpl request_headers_; Http::TestRequestTrailerMapImpl request_trailers_; - quic::QuicHeaderList response_headers_; spdy::SpdyHeaderBlock spdy_response_headers_; - quic::QuicHeaderList trailers_; spdy::SpdyHeaderBlock spdy_trailers_; Buffer::OwnedImpl request_body_{"Hello world"}; std::string response_body_{"OK\n"}; }; -INSTANTIATE_TEST_SUITE_P(EnvoyQuicClientStreamTests, EnvoyQuicClientStreamTest, - testing::ValuesIn({true, false})); - -TEST_P(EnvoyQuicClientStreamTest, GetRequestAndHeaderOnlyResponse) { +TEST_F(EnvoyQuicClientStreamTest, GetRequestAndHeaderOnlyResponse) { const auto result = quic_stream_->encodeHeaders(request_headers_, /*end_stream=*/true); EXPECT_TRUE(result.ok()); - EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/!quic::VersionUsesHttp3( - quic_version_.transport_version))) + EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) { EXPECT_EQ("200", headers->getStatusValue()); })); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(stream_decoder_, decodeData(BufferStringEqual(""), /*end_stream=*/true)); - std::string payload = spdyHeaderToHttp3StreamPayload(spdy_response_headers_); - quic::QuicStreamFrame frame(stream_id_, true, 0, payload); - quic_stream_->OnStreamFrame(frame); - } else { - quic_stream_->OnStreamHeaderList(/*fin=*/true, response_headers_.uncompressed_header_bytes(), - response_headers_); - } + EXPECT_CALL(stream_decoder_, decodeData(BufferStringEqual(""), /*end_stream=*/true)); + std::string payload = spdyHeaderToHttp3StreamPayload(spdy_response_headers_); + quic::QuicStreamFrame frame(stream_id_, true, 0, payload); + quic_stream_->OnStreamFrame(frame); EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); } -TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { +TEST_F(EnvoyQuicClientStreamTest, PostRequestAndResponse) { EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions()); const auto result = quic_stream_->encodeHeaders(request_headers_, false); EXPECT_TRUE(result.ok()); @@ -207,25 +182,19 @@ TEST_P(EnvoyQuicClientStreamTest, PostRequestAndResponse) { EXPECT_EQ("value1", headers->get(key1)[0]->value().getStringView()); EXPECT_TRUE(headers->get(key2).empty()); })); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - std::string more_response_body{"bbb"}; - EXPECT_CALL(stream_decoder_, decodeData(_, _)) - .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) { - EXPECT_EQ(more_response_body, buffer.toString()); - EXPECT_EQ(false, finished_reading); - })); - std::string payload = absl::StrCat(bodyToStreamPayload(more_response_body), - spdyHeaderToHttp3StreamPayload(spdy_trailers_)); - quic::QuicStreamFrame frame(stream_id_, true, offset, payload); - quic_stream_->OnStreamFrame(frame); - } else { - quic_stream_->OnStreamHeaderList( - /*fin=*/!quic::VersionUsesHttp3(quic_version_.transport_version), - trailers_.uncompressed_header_bytes(), trailers_); - } + std::string more_response_body{"bbb"}; + EXPECT_CALL(stream_decoder_, decodeData(_, _)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) { + EXPECT_EQ(more_response_body, buffer.toString()); + EXPECT_EQ(false, finished_reading); + })); + std::string payload = absl::StrCat(bodyToHttp3StreamPayload(more_response_body), + spdyHeaderToHttp3StreamPayload(spdy_trailers_)); + quic::QuicStreamFrame frame(stream_id_, true, offset, payload); + quic_stream_->OnStreamFrame(frame); } -TEST_P(EnvoyQuicClientStreamTest, PostRequestAnd100Continue) { +TEST_F(EnvoyQuicClientStreamTest, PostRequestAnd100Continue) { const auto result = quic_stream_->encodeHeaders(request_headers_, false); EXPECT_TRUE(result.ok()); @@ -245,94 +214,33 @@ TEST_P(EnvoyQuicClientStreamTest, PostRequestAnd100Continue) { // Receive several 10x headers, only the first 100 Continue header should be // delivered. for (const std::string& status : {"100", "103", "100"}) { - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - spdy::SpdyHeaderBlock continue_header; - continue_header[":status"] = status; - continue_header["i"] = absl::StrCat("", i++); - std::string data = spdyHeaderToHttp3StreamPayload(continue_header); - quic::QuicStreamFrame frame(stream_id_, false, offset, data); - quic_stream_->OnStreamFrame(frame); - offset += data.length(); - } else { - quic::QuicHeaderList continue_header; - continue_header.OnHeaderBlockStart(); - continue_header.OnHeader(":status", status); - continue_header.OnHeader("i", absl::StrCat("", i++)); - continue_header.OnHeaderBlockEnd(0, 0); - quic_stream_->OnStreamHeaderList(/*fin=*/false, continue_header.uncompressed_header_bytes(), - continue_header); - } + spdy::SpdyHeaderBlock continue_header; + continue_header[":status"] = status; + continue_header["i"] = absl::StrCat("", i++); + std::string data = spdyHeaderToHttp3StreamPayload(continue_header); + quic::QuicStreamFrame frame(stream_id_, false, offset, data); + quic_stream_->OnStreamFrame(frame); + offset += data.length(); } receiveResponse(response_body_, true, offset); } -TEST_P(EnvoyQuicClientStreamTest, ResetUpon101SwitchProtocol) { +TEST_F(EnvoyQuicClientStreamTest, ResetUpon101SwitchProtocol) { const auto result = quic_stream_->encodeHeaders(request_headers_, false); EXPECT_TRUE(result.ok()); EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::ProtocolError, _)); // Receive several 10x headers, only the first 100 Continue header should be // delivered. - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - spdy::SpdyHeaderBlock continue_header; - continue_header[":status"] = "101"; - std::string data = spdyHeaderToHttp3StreamPayload(continue_header); - quic::QuicStreamFrame frame(stream_id_, false, 0u, data); - quic_stream_->OnStreamFrame(frame); - } else { - quic::QuicHeaderList continue_header; - continue_header.OnHeaderBlockStart(); - continue_header.OnHeader(":status", "101"); - continue_header.OnHeaderBlockEnd(0, 0); - quic_stream_->OnStreamHeaderList(/*fin=*/false, continue_header.uncompressed_header_bytes(), - continue_header); - } -} - -TEST_P(EnvoyQuicClientStreamTest, OutOfOrderTrailers) { - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); - return; - } - const auto result = quic_stream_->encodeHeaders(request_headers_, true); - EXPECT_TRUE(result.ok()); - EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) - .WillOnce(Invoke([](const Http::ResponseHeaderMapPtr& headers, bool) { - EXPECT_EQ("200", headers->getStatusValue()); - })); - quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(), - response_headers_); - EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - - // Trailer should be delivered to HCM later after body arrives. - quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), trailers_); - - quic::QuicStreamFrame frame(stream_id_, false, 0, response_body_); - EXPECT_CALL(stream_decoder_, decodeData(_, _)) - .Times(testing::AtMost(2)) - .WillOnce(Invoke([this](Buffer::Instance& buffer, bool finished_reading) { - EXPECT_EQ(response_body_, buffer.toString()); - EXPECT_FALSE(finished_reading); - })) - // Depends on QUIC version, there may be an empty STREAM_FRAME with FIN. But - // since there is trailers, finished_reading should always be false. - .WillOnce(Invoke([](Buffer::Instance& buffer, bool finished_reading) { - EXPECT_FALSE(finished_reading); - EXPECT_EQ(0, buffer.length()); - })); - - EXPECT_CALL(stream_decoder_, decodeTrailers_(_)) - .WillOnce(Invoke([](const Http::ResponseTrailerMapPtr& headers) { - Http::LowerCaseString key1("key1"); - Http::LowerCaseString key2(":final-offset"); - EXPECT_EQ("value1", headers->get(key1)[0]->value().getStringView()); - EXPECT_TRUE(headers->get(key2).empty()); - })); + spdy::SpdyHeaderBlock continue_header; + continue_header[":status"] = "101"; + std::string data = spdyHeaderToHttp3StreamPayload(continue_header); + quic::QuicStreamFrame frame(stream_id_, false, 0u, data); quic_stream_->OnStreamFrame(frame); } -TEST_P(EnvoyQuicClientStreamTest, WatermarkSendBuffer) { +TEST_F(EnvoyQuicClientStreamTest, WatermarkSendBuffer) { // Bump connection flow control window large enough not to cause connection // level flow control blocked. quic::QuicWindowUpdateFrame window_update( @@ -388,14 +296,8 @@ TEST_P(EnvoyQuicClientStreamTest, WatermarkSendBuffer) { } // Tests that headers and trailers buffered in send buffer contribute towards buffer watermark -// limits. Only IETF QUIC writes them on data stream, gQUIC writes them on dedicated headers stream -// and only contributes to connection watermark buffer. -TEST_P(EnvoyQuicClientStreamTest, HeadersContributeToWatermarkIquic) { - if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); - return; - } - +// limits. +TEST_F(EnvoyQuicClientStreamTest, HeadersContributeToWatermark) { // Bump connection flow control window large enough not to cause connection level flow control // blocked quic::QuicWindowUpdateFrame window_update( @@ -464,48 +366,39 @@ TEST_P(EnvoyQuicClientStreamTest, HeadersContributeToWatermarkIquic) { EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); } -TEST_P(EnvoyQuicClientStreamTest, ResetStream) { - EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::LocalReset, _)); - quic_stream_->resetStream(Http::StreamResetReason::LocalReset); +TEST_F(EnvoyQuicClientStreamTest, ResetStream) { + EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::ConnectionFailure, _)); + quic_stream_->resetStream(Http::StreamResetReason::ConnectionFailure); EXPECT_TRUE(quic_stream_->rst_sent()); } -TEST_P(EnvoyQuicClientStreamTest, ReceiveResetStream) { +TEST_F(EnvoyQuicClientStreamTest, ReceiveResetStream) { EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::RemoteReset, _)); quic_stream_->OnStreamReset(quic::QuicRstStreamFrame( quic::kInvalidControlFrameId, quic_stream_->id(), quic::QUIC_STREAM_NO_ERROR, 0)); EXPECT_TRUE(quic_stream_->rst_received()); } -TEST_P(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingHeader) { +TEST_F(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingHeader) { const auto result = quic_stream_->encodeHeaders(request_headers_, false); EXPECT_TRUE(result.ok()); quic_stream_->encodeData(request_body_, true); - EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/!quic::VersionUsesHttp3( - quic_version_.transport_version))) + EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::ResponseHeaderMapPtr&, bool) { quic_connection_->CloseConnection( quic::QUIC_NO_ERROR, "Closed in decodeHeaders", quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET); })); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - // onResetStream() callback should be triggered because end_stream is - // not decoded with header. - EXPECT_CALL(stream_callbacks_, - onResetStream(Http::StreamResetReason::ConnectionTermination, _)); - std::string data = spdyHeaderToHttp3StreamPayload(spdy_response_headers_); - quic::QuicStreamFrame frame(stream_id_, true, 0, data); - quic_stream_->OnStreamFrame(frame); - } else { - // onResetStream() callback shouldn't be triggered because end_stream is - // already decoded. - quic_stream_->OnStreamHeaderList(/*fin=*/true, response_headers_.uncompressed_header_bytes(), - response_headers_); - } + // onResetStream() callback should be triggered because end_stream is + // not decoded with header. + EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::ConnectionTermination, _)); + std::string data = spdyHeaderToHttp3StreamPayload(spdy_response_headers_); + quic::QuicStreamFrame frame(stream_id_, true, 0, data); + quic_stream_->OnStreamFrame(frame); } -TEST_P(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingDataWithEndStream) { +TEST_F(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingDataWithEndStream) { const auto result = quic_stream_->encodeHeaders(request_headers_, false); EXPECT_TRUE(result.ok()); quic_stream_->encodeData(request_body_, true); @@ -518,21 +411,13 @@ TEST_P(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingDataWithEndStream quic::QUIC_NO_ERROR, "Closed in decodeDdata", quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET); })); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_response_headers_), - bodyToStreamPayload(response_body_)); - quic::QuicStreamFrame frame(stream_id_, true, 0, data); - quic_stream_->OnStreamFrame(frame); - } else { - quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(), - response_headers_); - - quic::QuicStreamFrame frame(stream_id_, true, 0, response_body_); - quic_stream_->OnStreamFrame(frame); - } + std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_response_headers_), + bodyToHttp3StreamPayload(response_body_)); + quic::QuicStreamFrame frame(stream_id_, true, 0, data); + quic_stream_->OnStreamFrame(frame); } -TEST_P(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingDataWithTrailer) { +TEST_F(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingDataWithTrailer) { const auto result = quic_stream_->encodeHeaders(request_headers_, false); EXPECT_TRUE(result.ok()); quic_stream_->encodeData(request_body_, true); @@ -547,25 +432,14 @@ TEST_P(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingDataWithTrailer) EXPECT_TRUE(quic_stream_->read_side_closed()); })); EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::ConnectionTermination, _)); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_response_headers_), - bodyToStreamPayload(response_body_), - spdyHeaderToHttp3StreamPayload(spdy_trailers_)); - quic::QuicStreamFrame frame(stream_id_, true, 0, data); - quic_stream_->OnStreamFrame(frame); - } else { - quic_stream_->OnStreamHeaderList(/*fin=*/false, response_headers_.uncompressed_header_bytes(), - response_headers_); - - quic::QuicStreamFrame frame(stream_id_, /*fin=*/false, 0, response_body_); - quic_stream_->OnStreamFrame(frame); - quic_stream_->OnStreamHeaderList( - /*fin=*/!quic::VersionUsesHttp3(quic_version_.transport_version), - trailers_.uncompressed_header_bytes(), trailers_); - } + std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_response_headers_), + bodyToHttp3StreamPayload(response_body_), + spdyHeaderToHttp3StreamPayload(spdy_trailers_)); + quic::QuicStreamFrame frame(stream_id_, true, 0, data); + quic_stream_->OnStreamFrame(frame); } -TEST_P(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingTrailer) { +TEST_F(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingTrailer) { const auto result = quic_stream_->encodeHeaders(request_headers_, true); EXPECT_TRUE(result.ok()); @@ -577,18 +451,12 @@ TEST_P(EnvoyQuicClientStreamTest, CloseConnectionDuringDecodingTrailer) { quic::QUIC_NO_ERROR, "Closed in decodeTrailers", quic::ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET); })); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - std::string payload = spdyHeaderToHttp3StreamPayload(spdy_trailers_); - quic::QuicStreamFrame frame(stream_id_, true, offset, payload); - quic_stream_->OnStreamFrame(frame); - } else { - quic_stream_->OnStreamHeaderList( - /*fin=*/!quic::VersionUsesHttp3(quic_version_.transport_version), - trailers_.uncompressed_header_bytes(), trailers_); - } + std::string payload = spdyHeaderToHttp3StreamPayload(spdy_trailers_); + quic::QuicStreamFrame frame(stream_id_, true, offset, payload); + quic_stream_->OnStreamFrame(frame); } -TEST_P(EnvoyQuicClientStreamTest, MetadataNotSupported) { +TEST_F(EnvoyQuicClientStreamTest, MetadataNotSupported) { Http::MetadataMap metadata_map = {{"key", "value"}}; Http::MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); Http::MetadataMapVector metadata_map_vector; @@ -599,25 +467,20 @@ TEST_P(EnvoyQuicClientStreamTest, MetadataNotSupported) { } // Tests that posted stream block callback won't cause use-after-free crash. -TEST_P(EnvoyQuicClientStreamTest, ReadDisabledBeforeClose) { +TEST_F(EnvoyQuicClientStreamTest, ReadDisabledBeforeClose) { const auto result = quic_stream_->encodeHeaders(request_headers_, /*end_stream=*/true); EXPECT_TRUE(result.ok()); - EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/!quic::VersionUsesHttp3( - quic_version_.transport_version))) + EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::ResponseHeaderMapPtr& headers, bool) { EXPECT_EQ("200", headers->getStatusValue()); quic_stream_->readDisable(true); })); - if (quic_version_.UsesHttp3()) { - EXPECT_CALL(stream_decoder_, decodeData(BufferStringEqual(""), /*end_stream=*/true)); - std::string payload = spdyHeaderToHttp3StreamPayload(spdy_response_headers_); - quic::QuicStreamFrame frame(stream_id_, true, 0, payload); - quic_stream_->OnStreamFrame(frame); - } else { - quic_stream_->OnStreamHeaderList(/*fin=*/true, response_headers_.uncompressed_header_bytes(), - response_headers_); - } + EXPECT_CALL(stream_decoder_, decodeData(BufferStringEqual(""), /*end_stream=*/true)); + std::string payload = spdyHeaderToHttp3StreamPayload(spdy_response_headers_); + quic::QuicStreamFrame frame(stream_id_, true, 0, payload); + quic_stream_->OnStreamFrame(frame); + // Reset to close the stream. EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::LocalReset, _)); quic_stream_->resetStream(Http::StreamResetReason::LocalReset); @@ -626,5 +489,25 @@ TEST_P(EnvoyQuicClientStreamTest, ReadDisabledBeforeClose) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); } +TEST_F(EnvoyQuicClientStreamTest, MaxIncomingHeadersCount) { + quic_session_.setMaxIncomingHeadersCount(100); + const auto result = quic_stream_->encodeHeaders(request_headers_, false); + EXPECT_TRUE(result.ok()); + quic_stream_->encodeData(request_body_, true); + + // Receive more response headers than allowed. Such response headers shouldn't be delivered to + // stream decoder. + EXPECT_CALL(stream_decoder_, decodeHeaders_(_, _)).Times(0u); + EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::LocalReset, _)); + for (size_t i = 0; i < 101; ++i) { + spdy_response_headers_[absl::StrCat("key", i)] = absl::StrCat("value", i); + } + std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_response_headers_), + bodyToHttp3StreamPayload(response_body_), + spdyHeaderToHttp3StreamPayload(spdy_trailers_)); + quic::QuicStreamFrame frame(stream_id_, true, 0, data); + quic_stream_->OnStreamFrame(frame); +} + } // namespace Quic } // namespace Envoy diff --git a/test/common/quic/envoy_quic_dispatcher_test.cc b/test/common/quic/envoy_quic_dispatcher_test.cc index 8260ef7b6e833..06119487b876b 100644 --- a/test/common/quic/envoy_quic_dispatcher_test.cc +++ b/test/common/quic/envoy_quic_dispatcher_test.cc @@ -48,11 +48,11 @@ namespace { const size_t kNumSessionsToCreatePerLoopForTests = 16; } -class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, +class EnvoyQuicDispatcherTest : public testing::TestWithParam, protected Logger::Loggable { public: EnvoyQuicDispatcherTest() - : version_(GetParam().first), api_(Api::createApiForTest(time_system_)), + : version_(GetParam()), api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), listen_socket_(std::make_unique>>( @@ -61,15 +61,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, crypto_config_(quic::QuicCryptoServerConfig::TESTING, quic::QuicRandom::GetInstance(), std::unique_ptr(proof_source_), quic::KeyExchangeSource::Default()), - version_manager_([]() { - if (GetParam().second == QuicVersionType::GquicQuicCrypto) { - return quic::CurrentSupportedVersionsWithQuicCrypto(); - } - bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); - SetQuicReloadableFlag(quic_disable_version_rfcv1, !use_http3); - return quic::CurrentSupportedVersions(); - }()), + version_manager_(quic::CurrentSupportedHttp3Versions()), quic_version_(version_manager_.GetSupportedVersions()[0]), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), POOL_GAUGE(listener_config_.listenerScope()), @@ -114,10 +106,8 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, void processValidChloPacket(const quic::QuicSocketAddress& peer_addr) { // Create a Quic Crypto or TLS1.3 CHLO packet. EnvoyQuicClock clock(*dispatcher_); - Buffer::OwnedImpl payload = generateChloPacketToSend( - quic_version_, quic_config_, crypto_config_, connection_id_, clock, - envoyIpAddressToQuicSocketAddress(listen_socket_->addressProvider().localAddress()->ip()), - peer_addr, "test.example.org"); + Buffer::OwnedImpl payload = + generateChloPacketToSend(quic_version_, quic_config_, connection_id_); Buffer::RawSliceVector slice = payload.getRawSlices(); ASSERT(slice.size() == 1); auto encrypted_packet = std::make_unique( @@ -127,7 +117,8 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, quic::test::ConstructReceivedPacket(*encrypted_packet, clock.Now())); envoy_quic_dispatcher_.ProcessPacket( - envoyIpAddressToQuicSocketAddress(listen_socket_->addressProvider().localAddress()->ip()), + envoyIpAddressToQuicSocketAddress( + listen_socket_->connectionInfoProvider().localAddress()->ip()), peer_addr, *received_packet); } @@ -171,10 +162,10 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, auto envoy_connection = static_cast(session); EXPECT_EQ("test.example.org", envoy_connection->requestedServerName()); EXPECT_EQ(peer_addr, envoyIpAddressToQuicSocketAddress( - envoy_connection->addressProvider().remoteAddress()->ip())); - ASSERT(envoy_connection->addressProvider().localAddress() != nullptr); - EXPECT_EQ(*listen_socket_->addressProvider().localAddress(), - *envoy_connection->addressProvider().localAddress()); + envoy_connection->connectionInfoProvider().remoteAddress()->ip())); + ASSERT(envoy_connection->connectionInfoProvider().localAddress() != nullptr); + EXPECT_EQ(*listen_socket_->connectionInfoProvider().localAddress(), + *envoy_connection->connectionInfoProvider().localAddress()); EXPECT_EQ(64 * 1024, envoy_connection->max_inbound_header_list_size()); } @@ -198,17 +189,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); EXPECT_CALL(filter_chain_manager, findFilterChain(_)) .WillOnce(Invoke([this](const Network::ConnectionSocket& socket) { - switch (GetParam().second) { - case QuicVersionType::GquicQuicCrypto: - EXPECT_EQ("", socket.requestedApplicationProtocols()[0]); - break; - case QuicVersionType::GquicTls: - EXPECT_EQ("h3-T051", socket.requestedApplicationProtocols()[0]); - break; - case QuicVersionType::Iquic: - EXPECT_EQ("h3", socket.requestedApplicationProtocols()[0]); - break; - } + EXPECT_EQ("h3", socket.requestedApplicationProtocols()[0]); EXPECT_EQ("test.example.org", socket.requestedServerName()); return &proof_source_->filterChain(); })); @@ -230,10 +211,6 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, EXPECT_CALL(*read_filter, onNewConnection()) // Stop iteration to avoid calling getRead/WriteBuffer(). .WillOnce(Return(Network::FilterStatus::StopIteration)); - if (!quicVersionUsesTls()) { - // The test utility can't generate 0-RTT packet for Quic TLS handshake yet. - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); - } processValidChloPacketAndCheckStatus(should_buffer); EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); @@ -241,8 +218,6 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, envoy_quic_dispatcher_.Shutdown(); } - bool quicVersionUsesTls() { return quic_version_.UsesTls(); } - protected: Network::Address::IpVersion version_; Event::SimulatedTimeSystemHelper time_system_; @@ -266,7 +241,8 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, }; INSTANTIATE_TEST_SUITE_P(EnvoyQuicDispatcherTests, EnvoyQuicDispatcherTest, - testing::ValuesIn(generateTestParam()), testParamsToString); + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); TEST_P(EnvoyQuicDispatcherTest, CreateNewConnectionUponCHLO) { processValidChloPacketAndInitializeFilters(false); @@ -310,10 +286,6 @@ TEST_P(EnvoyQuicDispatcherTest, CloseConnectionDuringFilterInstallation) { // Stop iteration to avoid calling getRead/WriteBuffer(). .WillOnce(Return(Network::FilterStatus::StopIteration)); - if (!quicVersionUsesTls()) { - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::Connected)); - } - EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 ? quic::QuicIpAddress::Loopback4() diff --git a/test/common/quic/envoy_quic_proof_source_test.cc b/test/common/quic/envoy_quic_proof_source_test.cc index efcd16337872f..75230db0f504f 100644 --- a/test/common/quic/envoy_quic_proof_source_test.cc +++ b/test/common/quic/envoy_quic_proof_source_test.cc @@ -12,6 +12,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "quiche/quic/core/crypto/certificate_view.h" #include "quiche/quic/test_tools/test_certificates.h" using testing::Invoke; @@ -22,13 +23,9 @@ namespace Envoy { namespace Quic { -class TestGetProofCallback : public quic::ProofSource::Callback { +class SignatureVerifier { public: - TestGetProofCallback(bool& called, bool should_succeed, const std::string& server_config, - quic::QuicTransportVersion& version, absl::string_view chlo_hash, - Network::FilterChain& filter_chain) - : called_(called), should_succeed_(should_succeed), server_config_(server_config), - version_(version), chlo_hash_(chlo_hash), expected_filter_chain_(filter_chain) { + SignatureVerifier() { ON_CALL(client_context_config_, cipherSuites) .WillByDefault(ReturnRef( Extensions::TransportSockets::Tls::ClientContextConfigImpl::DEFAULT_CIPHER_SUITES)); @@ -75,34 +72,29 @@ class TestGetProofCallback : public quic::ProofSource::Callback { verifier_ = std::make_unique(std::move(context)); } - // quic::ProofSource::Callback - void Run(bool ok, const quic::QuicReferenceCountedPointer& chain, - const quic::QuicCryptoProof& proof, - std::unique_ptr details) override { - called_ = true; - if (!should_succeed_) { - EXPECT_FALSE(ok); - return; - }; - EXPECT_TRUE(ok); - EXPECT_EQ(2, chain->certs.size()); + void + verifyCertsAndSignature(const quic::QuicReferenceCountedPointer& chain, + const std::string& payload, const std::string& signature) { + const std::string& leaf = chain->certs[0]; + std::unique_ptr cert_view = + quic::CertificateView::ParseSingleCertificate(leaf); + ASSERT_NE(cert_view, nullptr); + std::string error_details; + int sign_alg = deduceSignatureAlgorithmFromPublicKey(cert_view->public_key(), &error_details); + EXPECT_NE(sign_alg, 0); + EXPECT_TRUE(cert_view->VerifySignature(payload, signature, sign_alg)); + std::string error; EXPECT_EQ(quic::QUIC_SUCCESS, - verifier_->VerifyProof("www.example.org", 54321, server_config_, version_, chlo_hash_, - chain->certs, proof.leaf_cert_scts, proof.signature, nullptr, - &error, nullptr, nullptr)) + verifier_->VerifyCertChain("www.example.org", 54321, chain->certs, + /*ocsp_response=*/"", /*cert_sct=*/"Fake SCT", + /*context=*/nullptr, &error, + /*details=*/nullptr, /*out_alert=*/nullptr, + /*callback=*/nullptr)) << error; - EXPECT_EQ(&expected_filter_chain_, - &static_cast(details.get())->filterChain()); } private: - bool& called_; - bool should_succeed_; - const std::string& server_config_; - const quic::QuicTransportVersion& version_; - absl::string_view chlo_hash_; - Network::FilterChain& expected_filter_chain_; NiceMock store_; Event::GlobalTimeSystem time_system_; NiceMock client_context_config_; @@ -112,18 +104,29 @@ class TestGetProofCallback : public quic::ProofSource::Callback { class TestSignatureCallback : public quic::ProofSource::SignatureCallback { public: - TestSignatureCallback(bool expect_success) : expect_success_(expect_success) {} - ~TestSignatureCallback() override { EXPECT_TRUE(run_called_); } + TestSignatureCallback(bool expect_success, Network::FilterChain& filter_chain, + std::string& signature) + : expect_success_(expect_success), signature_(signature), + expected_filter_chain_(filter_chain) {} + ~TestSignatureCallback() override { EXPECT_TRUE(called_); } // quic::ProofSource::SignatureCallback - void Run(bool ok, std::string, std::unique_ptr) override { + void Run(bool ok, std::string signature, + std::unique_ptr details) override { + called_ = true; EXPECT_EQ(expect_success_, ok); - run_called_ = true; + if (ok) { + signature_ = signature; + EXPECT_EQ(&expected_filter_chain_, + &static_cast(details.get())->filterChain()); + } } private: bool expect_success_; - bool run_called_{false}; + bool called_; + std::string& signature_; + Network::FilterChain& expected_filter_chain_; }; class EnvoyQuicProofSourceTest : public ::testing::Test { @@ -148,11 +151,11 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) .WillRepeatedly(Invoke([&](const Network::ConnectionSocket& connection_socket) { EXPECT_EQ(*quicAddressToEnvoyAddressInstance(server_address_), - *connection_socket.addressProvider().localAddress()); + *connection_socket.connectionInfoProvider().localAddress()); EXPECT_EQ(*quicAddressToEnvoyAddressInstance(client_address_), - *connection_socket.addressProvider().remoteAddress()); + *connection_socket.connectionInfoProvider().remoteAddress()); EXPECT_EQ("quic", connection_socket.detectedTransportProtocol()); - EXPECT_EQ("h3-29", connection_socket.requestedApplicationProtocols()[0]); + EXPECT_EQ("h3", connection_socket.requestedApplicationProtocols()[0]); return &filter_chain_; })); EXPECT_CALL(filter_chain_, transportSocketFactory()) @@ -168,15 +171,6 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { } } - void testGetProof(bool expect_success) { - bool called = false; - auto callback = std::make_unique(called, expect_success, server_config_, - version_, chlo_hash_, filter_chain_); - proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_, - chlo_hash_, std::move(callback)); - EXPECT_TRUE(called); - } - protected: std::string hostname_{"www.fake.com"}; quic::QuicSocketAddress server_address_; @@ -197,17 +191,32 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { EnvoyQuicProofSource proof_source_; }; -TEST_F(EnvoyQuicProofSourceTest, TestGetProof) { +TEST_F(EnvoyQuicProofSourceTest, TestGetCerChainAndSignatureAndVerify) { expectCertChainAndPrivateKey(expected_certs_, true); - testGetProof(true); + quic::QuicReferenceCountedPointer chain = + proof_source_.GetCertChain(server_address_, client_address_, hostname_); + EXPECT_EQ(2, chain->certs.size()); + + std::string error_details; + bssl::UniquePtr cert = parseDERCertificate(chain->certs[0], &error_details); + EXPECT_NE(cert, nullptr); + bssl::UniquePtr pub_key(X509_get_pubkey(cert.get())); + int sign_alg = deduceSignatureAlgorithmFromPublicKey(pub_key.get(), &error_details); + EXPECT_EQ(sign_alg, SSL_SIGN_RSA_PSS_RSAE_SHA256); + std::string signature; + proof_source_.ComputeTlsSignature( + server_address_, client_address_, hostname_, SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", + std::make_unique(true, filter_chain_, signature)); + SignatureVerifier verifier; + verifier.verifyCertsAndSignature(chain, "payload", signature); } -TEST_F(EnvoyQuicProofSourceTest, GetProofFailBadConfig) { +TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailBadConfig) { // No filter chain. EXPECT_CALL(listen_socket_, ioHandle()).Times(3); EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return nullptr; })); - testGetProof(false); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); // Cert not ready. EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) @@ -215,54 +224,36 @@ TEST_F(EnvoyQuicProofSourceTest, GetProofFailBadConfig) { EXPECT_CALL(filter_chain_, transportSocketFactory()) .WillOnce(ReturnRef(*transport_socket_factory_)); EXPECT_CALL(*mock_context_config_, isReady()).WillOnce(Return(false)); - testGetProof(false); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); // No certs in config. - EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) - .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return &filter_chain_; })); - EXPECT_CALL(filter_chain_, transportSocketFactory()) - .WillOnce(ReturnRef(*transport_socket_factory_)); - EXPECT_CALL(*mock_context_config_, isReady()).WillOnce(Return(true)); - std::vector> tls_cert_configs{}; - EXPECT_CALL(*mock_context_config_, tlsCertificates()).WillOnce(Return(tls_cert_configs)); - testGetProof(false); -} - -TEST_F(EnvoyQuicProofSourceTest, GetProofFailNoCertConfig) { - bool called = false; - auto callback = std::make_unique(called, false, server_config_, version_, - chlo_hash_, filter_chain_); - EXPECT_CALL(listen_socket_, ioHandle()).Times(1u); EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) .WillRepeatedly(Invoke([&](const Network::ConnectionSocket& connection_socket) { EXPECT_EQ(*quicAddressToEnvoyAddressInstance(server_address_), - *connection_socket.addressProvider().localAddress()); + *connection_socket.connectionInfoProvider().localAddress()); EXPECT_EQ(*quicAddressToEnvoyAddressInstance(client_address_), - *connection_socket.addressProvider().remoteAddress()); + *connection_socket.connectionInfoProvider().remoteAddress()); EXPECT_EQ("quic", connection_socket.detectedTransportProtocol()); - EXPECT_EQ("h3-29", connection_socket.requestedApplicationProtocols()[0]); + EXPECT_EQ("h3", connection_socket.requestedApplicationProtocols()[0]); return &filter_chain_; })); EXPECT_CALL(filter_chain_, transportSocketFactory()) - .WillRepeatedly(ReturnRef(*transport_socket_factory_)); + .WillOnce(ReturnRef(*transport_socket_factory_)); EXPECT_CALL(*mock_context_config_, isReady()).WillOnce(Return(true)); - EXPECT_CALL(*mock_context_config_, tlsCertificates()) - .WillRepeatedly( - Return(std::vector>{})); - proof_source_.GetProof(server_address_, client_address_, hostname_, server_config_, version_, - chlo_hash_, std::move(callback)); - EXPECT_TRUE(called); + std::vector> tls_cert_configs{}; + EXPECT_CALL(*mock_context_config_, tlsCertificates()).WillOnce(Return(tls_cert_configs)); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); } -TEST_F(EnvoyQuicProofSourceTest, GetProofFailInvalidCert) { +TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailInvalidCert) { std::string invalid_cert{R"(-----BEGIN CERTIFICATE----- invalid certificate -----END CERTIFICATE-----)"}; expectCertChainAndPrivateKey(invalid_cert, false); - testGetProof(false); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); } -TEST_F(EnvoyQuicProofSourceTest, GetProofFailInvalidPublicKeyInCert) { +TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailInvalidPublicKeyInCert) { // This is a valid cert with RSA public key. But we don't support RSA key with // length < 1024. std::string cert_with_rsa_1024{R"(-----BEGIN CERTIFICATE----- @@ -284,7 +275,18 @@ x96rVeUbRJ/qU4//nNM/XQa9vIAIcTZ0jFhmb0c3R4rmoqqC3vkSDwtaE5yuS5T4 GUy+n0vQNB0cXGzgcGI= -----END CERTIFICATE-----)"}; expectCertChainAndPrivateKey(cert_with_rsa_1024, false); - testGetProof(false); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); +} + +TEST_F(EnvoyQuicProofSourceTest, ComputeSignatureFailNoFilterChain) { + EXPECT_CALL(listen_socket_, ioHandle()); + EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) + .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return nullptr; })); + + std::string signature; + proof_source_.ComputeTlsSignature( + server_address_, client_address_, hostname_, SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", + std::make_unique(false, filter_chain_, signature)); } TEST_F(EnvoyQuicProofSourceTest, UnexpectedPrivateKey) { @@ -315,9 +317,10 @@ HO6j1yxTIGU6w8++AQJACdFPnRidOaj5oJmcZq0s6WGTYfegjTOKgi5KQzO0FTwG qGm130brdD+1U1EJnEFmleLZ/W6mEi3MxcKpWOpTqQ== -----END RSA PRIVATE KEY-----)"); EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(rsa_pkey_1024_len)); - proof_source_.ComputeTlsSignature(server_address_, client_address_, hostname_, - SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", - std::make_unique(false)); + std::string signature; + proof_source_.ComputeTlsSignature( + server_address_, client_address_, hostname_, SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", + std::make_unique(false, filter_chain_, signature)); } TEST_F(EnvoyQuicProofSourceTest, InvalidPrivateKey) { @@ -334,9 +337,10 @@ TEST_F(EnvoyQuicProofSourceTest, InvalidPrivateKey) { EXPECT_CALL(*mock_context_config_, isReady()).WillOnce(Return(true)); std::string invalid_pkey("abcdefg"); EXPECT_CALL(tls_cert_config, privateKey()).WillOnce(ReturnRef(invalid_pkey)); - proof_source_.ComputeTlsSignature(server_address_, client_address_, hostname_, - SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", - std::make_unique(false)); + std::string signature; + proof_source_.ComputeTlsSignature( + server_address_, client_address_, hostname_, SSL_SIGN_RSA_PSS_RSAE_SHA256, "payload", + std::make_unique(false, filter_chain_, signature)); } } // namespace Quic diff --git a/test/common/quic/envoy_quic_proof_verifier_test.cc b/test/common/quic/envoy_quic_proof_verifier_test.cc index 442e9b867f961..21b7be3136a7e 100644 --- a/test/common/quic/envoy_quic_proof_verifier_test.cc +++ b/test/common/quic/envoy_quic_proof_verifier_test.cc @@ -149,8 +149,6 @@ TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureLeafCertWithGarbage) { TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidHost) { configCertVerificationDetails(true); - std::unique_ptr cert_view = - quic::CertificateView::ParseSingleCertificate(leaf_cert_); const std::string ocsp_response; const std::string cert_sct; std::string error_details; @@ -161,47 +159,8 @@ TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureInvalidHost) { EXPECT_EQ("Leaf certificate doesn't match hostname: unknown.org", error_details); } -TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureEmptyCertChain) { - configCertVerificationDetails(true); - std::unique_ptr cert_view = - quic::CertificateView::ParseSingleCertificate(leaf_cert_); - quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; - absl::string_view chlo_hash{"aaaaa"}; - std::string server_config{"Server Config"}; - const std::string ocsp_response; - const std::string cert_sct; - std::string error_details; - const std::vector certs; - EXPECT_EQ(quic::QUIC_FAILURE, - verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, - server_config, version, chlo_hash, certs, cert_sct, "signature", - nullptr, &error_details, nullptr, nullptr)); - EXPECT_EQ("Received empty cert chain.", error_details); -} - -TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureInvalidLeafCert) { +TEST_F(EnvoyQuicProofVerifierTest, VerifyCertChainFailureUnsupportedECKey) { configCertVerificationDetails(true); - std::unique_ptr cert_view = - quic::CertificateView::ParseSingleCertificate(leaf_cert_); - quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; - absl::string_view chlo_hash{"aaaaa"}; - std::string server_config{"Server Config"}; - const std::string ocsp_response; - const std::string cert_sct; - std::string error_details; - const std::vector certs{"invalid leaf cert"}; - EXPECT_EQ(quic::QUIC_FAILURE, - verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, - server_config, version, chlo_hash, certs, cert_sct, "signature", - nullptr, &error_details, nullptr, nullptr)); - EXPECT_EQ("Invalid leaf cert.", error_details); -} - -TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureUnsupportedECKey) { - configCertVerificationDetails(true); - quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; - absl::string_view chlo_hash{"aaaaa"}; - std::string server_config{"Server Config"}; const std::string ocsp_response; const std::string cert_sct; std::string error_details; @@ -228,28 +187,10 @@ VdGXMAjeXhnOnPvmDi5hUz/uvI+Pg6cNmUoCRwSCnK/DazhA quic::CertificateView::ParseSingleCertificate(chain[0]); ASSERT(cert_view); EXPECT_EQ(quic::QUIC_FAILURE, - verifier_->VerifyProof("www.google.com", 54321, server_config, version, chlo_hash, - chain, cert_sct, "signature", nullptr, &error_details, nullptr, - nullptr)); + verifier_->VerifyCertChain("www.google.com", 54321, chain, ocsp_response, cert_sct, + nullptr, &error_details, nullptr, nullptr, nullptr)); EXPECT_EQ("Invalid leaf cert, only P-256 ECDSA certificates are supported", error_details); } -TEST_F(EnvoyQuicProofVerifierTest, VerifyProofFailureInvalidSignature) { - configCertVerificationDetails(true); - std::unique_ptr cert_view = - quic::CertificateView::ParseSingleCertificate(leaf_cert_); - quic::QuicTransportVersion version{quic::QUIC_VERSION_UNSUPPORTED}; - absl::string_view chlo_hash{"aaaaa"}; - std::string server_config{"Server Config"}; - const std::string ocsp_response; - const std::string cert_sct; - std::string error_details; - EXPECT_EQ(quic::QUIC_FAILURE, - verifier_->VerifyProof(std::string(cert_view->subject_alt_name_domains()[0]), 54321, - server_config, version, chlo_hash, {leaf_cert_}, cert_sct, - "signature", nullptr, &error_details, nullptr, nullptr)); - EXPECT_EQ("Signature is not valid.", error_details); -} - } // namespace Quic } // namespace Envoy diff --git a/test/common/quic/envoy_quic_server_session_test.cc b/test/common/quic/envoy_quic_server_session_test.cc index 349afd7ff9737..f44b359cc2b78 100644 --- a/test/common/quic/envoy_quic_server_session_test.cc +++ b/test/common/quic/envoy_quic_server_session_test.cc @@ -146,16 +146,15 @@ class EnvoyQuicTestCryptoServerStreamFactory : public EnvoyQuicCryptoServerStrea } }; -class EnvoyQuicServerSessionTest : public testing::TestWithParam { +class EnvoyQuicServerSessionTest : public testing::Test { public: EnvoyQuicServerSessionTest() : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); - SetQuicReloadableFlag(quic_disable_version_rfcv1, !GetParam()); - return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); - }()), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_({[]() { + SetQuicReloadableFlag(quic_decline_server_push_stream, true); + return quic::CurrentSupportedHttp3Versions()[0]; + }()}), quic_stat_names_(listener_config_.listenerScope().symbolTable()), quic_connection_(new MockEnvoyQuicServerConnection( connection_helper_, alarm_factory_, writer_, quic_version_, *listener_config_.socket_)), @@ -234,8 +233,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { encoder.getStream().addCallbacks(stream_callbacks); return request_decoder; })); - quic::QuicStreamId stream_id = - quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; + quic::QuicStreamId stream_id = 4u; return envoy_quic_session_.GetOrCreateStream(stream_id); } @@ -280,12 +278,8 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { envoy::config::core::v3::Http3ProtocolOptions http3_options_; }; -INSTANTIATE_TEST_SUITE_P(EnvoyQuicServerSessionTests, EnvoyQuicServerSessionTest, - testing::ValuesIn({true, false})); - -TEST_P(EnvoyQuicServerSessionTest, NewStreamBeforeInitializingFilter) { - quic::QuicStreamId stream_id = - quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; +TEST_F(EnvoyQuicServerSessionTest, NewStreamBeforeInitializingFilter) { + quic::QuicStreamId stream_id = 4u; EXPECT_ENVOY_BUG(envoy_quic_session_.GetOrCreateStream(stream_id), fmt::format("attempts to create stream", envoy_quic_session_.id(), stream_id)); EXPECT_CALL(*quic_connection_, @@ -296,14 +290,13 @@ TEST_P(EnvoyQuicServerSessionTest, NewStreamBeforeInitializingFilter) { envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush); } -TEST_P(EnvoyQuicServerSessionTest, NewStream) { +TEST_F(EnvoyQuicServerSessionTest, NewStream) { installReadFilter(); Http::MockRequestDecoder request_decoder; EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) .WillOnce(testing::ReturnRef(request_decoder)); - quic::QuicStreamId stream_id = - quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; + quic::QuicStreamId stream_id = 4u; auto stream = reinterpret_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); @@ -328,93 +321,40 @@ TEST_P(EnvoyQuicServerSessionTest, NewStream) { stream->OnStreamHeaderList(/*fin=*/true, headers.uncompressed_header_bytes(), headers); } -TEST_P(EnvoyQuicServerSessionTest, InvalidIncomingStreamId) { +TEST_F(EnvoyQuicServerSessionTest, InvalidIncomingStreamId) { installReadFilter(); Http::MockRequestDecoder request_decoder; Http::MockStreamCallbacks stream_callbacks; - // IETF stream 5 and G-Quic stream 2 are server initiated. - quic::QuicStreamId stream_id = - quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 5u : 2u; + // IETF stream 5 is server initiated. + quic::QuicStreamId stream_id = 5u; std::string data("aaaa"); quic::QuicStreamFrame stream_frame(stream_id, false, 0, data); EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0); - EXPECT_CALL(*quic_connection_, - SendConnectionClosePacket((quic::VersionUsesHttp3(quic_version_[0].transport_version) - ? quic::QUIC_HTTP_STREAM_WRONG_DIRECTION - : quic::QUIC_INVALID_STREAM_ID), - _, "Data for nonexistent stream")); + EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(quic::QUIC_HTTP_STREAM_WRONG_DIRECTION, + _, "Data for nonexistent stream")); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); envoy_quic_session_.OnStreamFrame(stream_frame); } -TEST_P(EnvoyQuicServerSessionTest, NoNewStreamForInvalidIncomingStream) { +TEST_F(EnvoyQuicServerSessionTest, NoNewStreamForInvalidIncomingStream) { installReadFilter(); Http::MockRequestDecoder request_decoder; Http::MockStreamCallbacks stream_callbacks; - // IETF stream 5 and G-Quic stream 2 are server initiated. - quic::QuicStreamId stream_id = - quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 5u : 2u; + // IETF stream 5 is server initiated. + quic::QuicStreamId stream_id = 5u; EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0); - EXPECT_CALL(*quic_connection_, - SendConnectionClosePacket(quic::VersionUsesHttp3(quic_version_[0].transport_version) - ? quic::QUIC_HTTP_STREAM_WRONG_DIRECTION - : quic::QUIC_INVALID_STREAM_ID, - _, "Data for nonexistent stream")); + EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(quic::QUIC_HTTP_STREAM_WRONG_DIRECTION, + _, "Data for nonexistent stream")); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); // Stream creation on closed connection should fail. EXPECT_EQ(nullptr, envoy_quic_session_.GetOrCreateStream(stream_id)); } -TEST_P(EnvoyQuicServerSessionTest, OnResetFrameGoogleQuic) { +TEST_F(EnvoyQuicServerSessionTest, OnResetFrameIetfQuic) { installReadFilter(); - if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { - return; - } - Http::MockRequestDecoder request_decoder; - Http::MockStreamCallbacks stream_callbacks; - quic::QuicStream* stream1 = createNewStream(request_decoder, stream_callbacks); - quic::QuicRstStreamFrame rst1(/*control_frame_id=*/1u, stream1->id(), - quic::QUIC_ERROR_PROCESSING_STREAM, /*bytes_written=*/0u); - EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::RemoteReset, _)); - EXPECT_CALL(*quic_connection_, SendControlFrame(_)) - .WillOnce(Invoke([stream_id = stream1->id()](const quic::QuicFrame& frame) { - EXPECT_EQ(stream_id, frame.rst_stream_frame->stream_id); - EXPECT_EQ(quic::QUIC_RST_ACKNOWLEDGEMENT, frame.rst_stream_frame->error_code); - return false; - })); - envoy_quic_session_.OnRstStream(rst1); - - EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) - .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder, - bool) -> Http::RequestDecoder& { - encoder.getStream().addCallbacks(stream_callbacks); - return request_decoder; - })); - quic::QuicStream* stream2 = envoy_quic_session_.GetOrCreateStream(stream1->id() + 4u); - quic::QuicRstStreamFrame rst2(/*control_frame_id=*/1u, stream2->id(), quic::QUIC_REFUSED_STREAM, - /*bytes_written=*/0u); - EXPECT_CALL(stream_callbacks, - onResetStream(Http::StreamResetReason::RemoteRefusedStreamReset, _)); - - envoy_quic_session_.OnRstStream(rst2); - EXPECT_EQ(1U, TestUtility::findCounter( - static_cast(listener_config_.listenerScope()), - "http3.downstream.rx.quic_reset_stream_error_code_QUIC_REFUSED_STREAM") - ->value()); - EXPECT_EQ(1U, TestUtility::findCounter( - static_cast(listener_config_.listenerScope()), - "http3.downstream.rx.quic_reset_stream_error_code_QUIC_ERROR_PROCESSING_STREAM") - ->value()); -} - -TEST_P(EnvoyQuicServerSessionTest, OnResetFrameIetfQuic) { - installReadFilter(); - if (!quic::VersionUsesHttp3(quic_version_[0].transport_version)) { - return; - } Http::MockRequestDecoder request_decoder; Http::MockStreamCallbacks stream_callbacks; auto stream1 = @@ -480,7 +420,7 @@ TEST_P(EnvoyQuicServerSessionTest, OnResetFrameIetfQuic) { ->value()); } -TEST_P(EnvoyQuicServerSessionTest, ConnectionClose) { +TEST_F(EnvoyQuicServerSessionTest, ConnectionClose) { installReadFilter(); std::string error_details("dummy details"); @@ -495,7 +435,7 @@ TEST_P(EnvoyQuicServerSessionTest, ConnectionClose) { EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); } -TEST_P(EnvoyQuicServerSessionTest, ConnectionCloseWithActiveStream) { +TEST_F(EnvoyQuicServerSessionTest, ConnectionCloseWithActiveStream) { installReadFilter(); Http::MockRequestDecoder request_decoder; @@ -510,7 +450,24 @@ TEST_P(EnvoyQuicServerSessionTest, ConnectionCloseWithActiveStream) { EXPECT_TRUE(stream->write_side_closed() && stream->reading_stopped()); } -TEST_P(EnvoyQuicServerSessionTest, NoFlushWithDataToWrite) { +TEST_F(EnvoyQuicServerSessionTest, RemoteConnectionCloseWithActiveStream) { + installReadFilter(); + + Http::MockRequestDecoder request_decoder; + Http::MockStreamCallbacks stream_callbacks; + quic::QuicStream* stream = createNewStream(request_decoder, stream_callbacks); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::RemoteClose)); + EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::ConnectionFailure, _)); + quic::QuicConnectionCloseFrame frame(quic_version_[0].transport_version, + quic::QUIC_HANDSHAKE_TIMEOUT, quic::NO_IETF_QUIC_ERROR, + "dummy details", + /* transport_close_frame_type = */ 0); + quic_connection_->OnConnectionCloseFrame(frame); + EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); + EXPECT_TRUE(stream->write_side_closed() && stream->reading_stopped()); +} + +TEST_F(EnvoyQuicServerSessionTest, NoFlushWithDataToWrite) { installReadFilter(); Http::MockRequestDecoder request_decoder; @@ -528,7 +485,7 @@ TEST_P(EnvoyQuicServerSessionTest, NoFlushWithDataToWrite) { EXPECT_TRUE(stream->write_side_closed() && stream->reading_stopped()); } -TEST_P(EnvoyQuicServerSessionTest, FlushCloseWithDataToWrite) { +TEST_F(EnvoyQuicServerSessionTest, FlushCloseWithDataToWrite) { installReadFilter(); Http::MockRequestDecoder request_decoder; Http::MockStreamCallbacks stream_callbacks; @@ -551,7 +508,7 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseWithDataToWrite) { // Tests that a write event after flush close should update the delay close // timer. -TEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { +TEST_F(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { installReadFilter(); // Drive congestion control manually. auto send_algorithm = new testing::NiceMock; @@ -636,7 +593,7 @@ TEST_P(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { // Tests that if delay close timeout is not configured, flush close will not act // based on timeout. -TEST_P(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) { +TEST_F(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) { installReadFilter(); // Switch to a encryption forward secure crypto stream. quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); @@ -723,7 +680,7 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) { envoy_quic_session_.close(Network::ConnectionCloseType::NoFlush); } -TEST_P(EnvoyQuicServerSessionTest, FlushCloseWithTimeout) { +TEST_F(EnvoyQuicServerSessionTest, FlushCloseWithTimeout) { installReadFilter(); envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100)); Http::MockRequestDecoder request_decoder; @@ -754,7 +711,7 @@ TEST_P(EnvoyQuicServerSessionTest, FlushCloseWithTimeout) { EXPECT_FALSE(quic_connection_->connected()); } -TEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithTimeout) { +TEST_F(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithTimeout) { installReadFilter(); envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100)); Http::MockRequestDecoder request_decoder; @@ -787,7 +744,7 @@ TEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithTimeout) { EXPECT_FALSE(quic_connection_->connected()); } -TEST_P(EnvoyQuicServerSessionTest, FlusWriteTransitToFlushWriteWithDelay) { +TEST_F(EnvoyQuicServerSessionTest, FlusWriteTransitToFlushWriteWithDelay) { installReadFilter(); envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100)); Http::MockRequestDecoder request_decoder; @@ -824,7 +781,7 @@ TEST_P(EnvoyQuicServerSessionTest, FlusWriteTransitToFlushWriteWithDelay) { EXPECT_FALSE(quic_connection_->connected()); } -TEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithNoPendingData) { +TEST_F(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithNoPendingData) { installReadFilter(); envoy_quic_session_.setDelayedCloseTimeout(std::chrono::milliseconds(100)); // This close should be delayed as configured. @@ -847,31 +804,23 @@ TEST_P(EnvoyQuicServerSessionTest, FlushAndWaitForCloseWithNoPendingData) { EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); } -TEST_P(EnvoyQuicServerSessionTest, ShutdownNotice) { +TEST_F(EnvoyQuicServerSessionTest, ShutdownNotice) { installReadFilter(); testing::NiceMock debug_visitor; envoy_quic_session_.set_debug_visitor(&debug_visitor); - if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { - EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_)); - } else { - // This is a no-op for pre-HTTP3 versions of QUIC. - } + EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_)); http_connection_->shutdownNotice(); } -TEST_P(EnvoyQuicServerSessionTest, GoAway) { +TEST_F(EnvoyQuicServerSessionTest, GoAway) { installReadFilter(); testing::NiceMock debug_visitor; envoy_quic_session_.set_debug_visitor(&debug_visitor); - if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { - EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_)); - } else { - EXPECT_CALL(*quic_connection_, SendControlFrame(_)); - } + EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_)); http_connection_->goAway(); } -TEST_P(EnvoyQuicServerSessionTest, ConnectedAfterHandshake) { +TEST_F(EnvoyQuicServerSessionTest, ConnectedAfterHandshake) { installReadFilter(); EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::Connected)); if (!quic_version_[0].UsesTls()) { @@ -886,13 +835,13 @@ TEST_P(EnvoyQuicServerSessionTest, ConnectedAfterHandshake) { EXPECT_FALSE(quic_connection_->connectionSocket()->ioHandle().isOpen()); } -TEST_P(EnvoyQuicServerSessionTest, NetworkConnectionInterface) { +TEST_F(EnvoyQuicServerSessionTest, NetworkConnectionInterface) { installReadFilter(); EXPECT_EQ(dispatcher_.get(), &envoy_quic_session_.dispatcher()); EXPECT_TRUE(envoy_quic_session_.readEnabled()); } -TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { +TEST_F(EnvoyQuicServerSessionTest, SendBufferWatermark) { // Switch to a encryption forward secure crypto stream. quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); quic::test::QuicServerSessionBasePeer::SetCryptoStream( @@ -926,8 +875,7 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { encoder.getStream().addCallbacks(stream_callbacks); return request_decoder; })); - quic::QuicStreamId stream_id = - quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; + quic::QuicStreamId stream_id = 4u; auto stream1 = dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); @@ -1057,8 +1005,8 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { EXPECT_TRUE(stream2->IsFlowControlBlocked()); // Resetting stream3 should lower the buffered bytes, but callbacks will not - // be triggered because reset callback has been already triggered. - EXPECT_CALL(stream_callbacks3, onResetStream(Http::StreamResetReason::LocalReset, "")); + // be triggered because end stream is already encoded. + EXPECT_CALL(stream_callbacks3, onResetStream(Http::StreamResetReason::LocalReset, "")).Times(0); // Connection buffered data book keeping should also be updated. EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark()); stream3->resetStream(Http::StreamResetReason::LocalReset); @@ -1077,223 +1025,21 @@ TEST_P(EnvoyQuicServerSessionTest, SendBufferWatermark) { EXPECT_TRUE(stream2->write_side_closed()); } -TEST_P(EnvoyQuicServerSessionTest, HeadersContributeToWatermarkGquic) { - if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { - installReadFilter(); - return; - } - // Switch to a encryption forward secure crypto stream. - quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); - quic::test::QuicServerSessionBasePeer::SetCryptoStream( - &envoy_quic_session_, - new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, - &envoy_quic_session_, &crypto_stream_helper_)); - quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); - quic_connection_->SetEncrypter( - quic::ENCRYPTION_FORWARD_SECURE, - std::make_unique(quic::Perspective::IS_SERVER)); - // Drive congestion control manually. - auto send_algorithm = new testing::NiceMock; - quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm); - EXPECT_CALL(*send_algorithm, PacingRate(_)).WillRepeatedly(Return(quic::QuicBandwidth::Zero())); - EXPECT_CALL(*send_algorithm, BandwidthEstimate()) - .WillRepeatedly(Return(quic::QuicBandwidth::Zero())); - EXPECT_CALL(*quic_connection_, SendControlFrame(_)).Times(AnyNumber()); - - // Bump connection flow control window large enough not to interfere - // stream writing. - envoy_quic_session_.flow_controller()->UpdateSendWindowOffset( - 10 * quic::kDefaultFlowControlSendWindow); - installReadFilter(); - Http::MockRequestDecoder request_decoder; - Http::MockStreamCallbacks stream_callbacks; - EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) - .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder, - bool) -> Http::RequestDecoder& { - encoder.getStream().addCallbacks(stream_callbacks); - return request_decoder; - })); - quic::QuicStreamId stream_id = - quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; - auto stream1 = - dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); - - // Receive a GET request on created stream. - quic::QuicHeaderList request_headers; - request_headers.OnHeaderBlockStart(); - std::string host("www.abc.com"); - request_headers.OnHeader(":authority", host); - request_headers.OnHeader(":method", "GET"); - request_headers.OnHeader(":path", "/"); - request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0); - // Request headers should be propagated to decoder. - EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) - .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->getHostValue()); - EXPECT_EQ("/", decoded_headers->getPathValue()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); - })); - stream1->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), - request_headers); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - // Make connection congestion control blocked so headers are buffered. - EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false)); - stream1->encodeHeaders(response_headers, false); - // Buffer a response slightly smaller than connection level watermark, but - // with the previously buffered headers, this write should reach high - // watermark. - std::string response(24 * 1024 - 1, 'a'); - Buffer::OwnedImpl buffer(response); - // Triggered twice, once by stream, the other time by connection. - EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()).Times(2); - EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark) - .WillOnce(Invoke( - [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); })); - stream1->encodeData(buffer, false); - EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked()); - - // Write the buffered data out till stream is flow control blocked. Both - // stream and connection level buffers should drop below watermark. - EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true)); - EXPECT_CALL(*send_algorithm, GetCongestionWindow()).WillRepeatedly(Return(quic::kDefaultTCPMSS)); - EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark) - .WillOnce(Invoke( - [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); - EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()).Times(2); - envoy_quic_session_.OnCanWrite(); - EXPECT_TRUE(stream1->IsFlowControlBlocked()); - - // Buffer more response because of flow control. The buffered bytes become just below connection - // level high watermark. - std::string response1(16 * 1024 - 20, 'a'); - Buffer::OwnedImpl buffer1(response1); - EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()); - stream1->encodeData(buffer1, false); - - // Make connection congestion control blocked again. - EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false)); - // Buffering the trailers will cause connection to reach high watermark. - EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark) - .WillOnce(Invoke( - [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); })); - Http::TestResponseTrailerMapImpl response_trailers{{"trailer-key", "trailer-value"}}; - stream1->encodeTrailers(response_trailers); - - EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark) - .WillOnce(Invoke( - [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); - EXPECT_CALL(stream_callbacks, onResetStream(Http::StreamResetReason::LocalReset, _)); - stream1->resetStream(Http::StreamResetReason::LocalReset); - - EXPECT_EQ(1U, TestUtility::findCounter( - static_cast(listener_config_.listenerScope()), - "http3.downstream.tx.quic_reset_stream_error_code_QUIC_STREAM_CANCELLED") - ->value()); -} - -TEST_P(EnvoyQuicServerSessionTest, OnCanWriteUpdateWatermarkGquic) { - if (quic::VersionUsesHttp3(quic_version_[0].transport_version)) { - installReadFilter(); - return; - } - // Switch to a encryption forward secure crypto stream. - quic::test::QuicServerSessionBasePeer::SetCryptoStream(&envoy_quic_session_, nullptr); - quic::test::QuicServerSessionBasePeer::SetCryptoStream( - &envoy_quic_session_, - new TestQuicCryptoServerStream(&crypto_config_, &compressed_certs_cache_, - &envoy_quic_session_, &crypto_stream_helper_)); - quic_connection_->SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); - quic_connection_->SetEncrypter( - quic::ENCRYPTION_FORWARD_SECURE, - std::make_unique(quic::Perspective::IS_SERVER)); - // Drive congestion control manually. - auto send_algorithm = new testing::NiceMock; - quic::test::QuicConnectionPeer::SetSendAlgorithm(quic_connection_, send_algorithm); - EXPECT_CALL(*send_algorithm, PacingRate(_)).WillRepeatedly(Return(quic::QuicBandwidth::Zero())); - EXPECT_CALL(*send_algorithm, BandwidthEstimate()) - .WillRepeatedly(Return(quic::QuicBandwidth::Zero())); - EXPECT_CALL(*quic_connection_, SendControlFrame(_)).Times(AnyNumber()); - - // Bump connection flow control window large enough not to interfere - // stream writing. - envoy_quic_session_.flow_controller()->UpdateSendWindowOffset( - 10 * quic::kDefaultFlowControlSendWindow); +TEST_F(EnvoyQuicServerSessionTest, IncomingUnidirectionalReadStream) { installReadFilter(); Http::MockRequestDecoder request_decoder; Http::MockStreamCallbacks stream_callbacks; - EXPECT_CALL(http_connection_callbacks_, newStream(_, false)) - .WillOnce(Invoke([&request_decoder, &stream_callbacks](Http::ResponseEncoder& encoder, - bool) -> Http::RequestDecoder& { - encoder.getStream().addCallbacks(stream_callbacks); - return request_decoder; - })); - quic::QuicStreamId stream_id = - quic::VersionUsesHttp3(quic_version_[0].transport_version) ? 4u : 5u; - auto stream1 = - dynamic_cast(envoy_quic_session_.GetOrCreateStream(stream_id)); - - // Receive a GET request on created stream. - quic::QuicHeaderList request_headers; - request_headers.OnHeaderBlockStart(); - std::string host("www.abc.com"); - request_headers.OnHeader(":authority", host); - request_headers.OnHeader(":method", "GET"); - request_headers.OnHeader(":path", "/"); - request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0); - // Request headers should be propagated to decoder. - EXPECT_CALL(request_decoder, decodeHeaders_(_, /*end_stream=*/true)) - .WillOnce(Invoke([&host](const Http::RequestHeaderMapPtr& decoded_headers, bool) { - EXPECT_EQ(host, decoded_headers->getHostValue()); - EXPECT_EQ("/", decoded_headers->getPathValue()); - EXPECT_EQ(Http::Headers::get().MethodValues.Get, decoded_headers->getMethodValue()); - })); - stream1->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), - request_headers); - - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - stream1->encodeHeaders(response_headers, false); - // Make connection congestion control blocked. - EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false)); - // Buffer a response slightly smaller than connection level watermark, but - // with the previously buffered headers, this write should reach high - // watermark. - std::string response(24 * 1024 - 1, 'a'); - Buffer::OwnedImpl buffer(response); - // Triggered twice, once by stream, the other time by connection. - EXPECT_CALL(stream_callbacks, onAboveWriteBufferHighWatermark()).Times(2); - EXPECT_CALL(network_connection_callbacks_, onAboveWriteBufferHighWatermark) - .WillOnce(Invoke( - [this]() { http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); })); - stream1->encodeData(buffer, false); - EXPECT_FALSE(envoy_quic_session_.IsConnectionFlowControlBlocked()); - - // Write the buffered data out till stream is flow control blocked. Both - // stream and connection level buffers should drop below watermark. - bool congestion_control_blocked{false}; - EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(!congestion_control_blocked)); - EXPECT_CALL(*send_algorithm, GetCongestionWindow()).WillRepeatedly(Return(quic::kDefaultTCPMSS)); - EXPECT_CALL(network_connection_callbacks_, onBelowWriteBufferLowWatermark) - .WillOnce(Invoke( - [this]() { http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); })); - // Write trailers when buffered bytes drops below low watermark. - EXPECT_CALL(stream_callbacks, onBelowWriteBufferLowWatermark()) - .Times(2) - .WillOnce(Return()) - .WillOnce(Invoke([stream1, send_algorithm]() { - // Block this trailer so that it gets buffered in headers stream. - // Verify that the buffered bytes are counted into watermark only once. - EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false)); - - // The trailer is large enough so that if its data is counted into watermark once the - // watermark won't go across the high watermark, if it is counted twice, it would go beyond - // high watermark. Note that the trailers are compressed by Hpack by ~38%. - Http::TestResponseTrailerMapImpl response_trailers{ - {"long-trailer1", std::string(16 * 1024, 'a')}}; - stream1->encodeTrailers(response_trailers); - })); - envoy_quic_session_.OnCanWrite(); - EXPECT_TRUE(stream1->IsFlowControlBlocked()); + // IETF stream 2 is client initiated uni-directional stream. + quic::QuicStreamId stream_id = 2u; + auto payload = std::make_unique(8); + quic::QuicDataWriter payload_writer(8, payload.get()); + EXPECT_TRUE(payload_writer.WriteVarInt62(1ul)); + EXPECT_CALL(http_connection_callbacks_, newStream(_, false)).Times(0u); + EXPECT_CALL(network_connection_callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); + EXPECT_CALL(*quic_connection_, SendConnectionClosePacket(quic::QUIC_HTTP_RECEIVE_SERVER_PUSH, _, + "Received server push stream")); + quic::QuicStreamFrame stream_frame(stream_id, false, 0, absl::string_view(payload.get(), 1)); + envoy_quic_session_.OnStreamFrame(stream_frame); } } // namespace Quic diff --git a/test/common/quic/envoy_quic_server_stream_test.cc b/test/common/quic/envoy_quic_server_stream_test.cc index 53ffe144a0e3a..74d5459db9da6 100644 --- a/test/common/quic/envoy_quic_server_stream_test.cc +++ b/test/common/quic/envoy_quic_server_stream_test.cc @@ -7,6 +7,7 @@ #pragma GCC diagnostic ignored "-Winvalid-offsetof" #endif +#include "quiche/quic/core/crypto/null_encrypter.h" #include "quiche/quic/test_tools/quic_connection_peer.h" #include "quiche/quic/test_tools/quic_session_peer.h" @@ -39,16 +40,13 @@ using testing::Invoke; namespace Envoy { namespace Quic { -class EnvoyQuicServerStreamTest : public testing::TestWithParam { +class EnvoyQuicServerStreamTest : public testing::Test { public: EnvoyQuicServerStreamTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); - SetQuicReloadableFlag(quic_disable_version_rfcv1, !GetParam()); - return quic::CurrentSupportedVersions()[0]; - }()), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), + quic_version_(quic::CurrentSupportedHttp3Versions()[0]), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), POOL_GAUGE(listener_config_.listenerScope()), POOL_HISTOGRAM(listener_config_.listenerScope()))}), @@ -56,7 +54,7 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { quic::ParsedQuicVersionVector{quic_version_}, *listener_config_.socket_), quic_session_(quic_config_, {quic_version_}, &quic_connection_, *dispatcher_, quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), - stream_id_(VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), + stream_id_(4u), stats_( {ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(listener_config_.listenerScope(), "http3."), POOL_GAUGE_PREFIX(listener_config_.listenerScope(), "http3."))}), @@ -86,13 +84,12 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { void SetUp() override { quic_session_.Initialize(); setQuicConfigWithDefaultValues(quic_session_.config()); + quic_connection_.SetEncrypter( + quic::ENCRYPTION_FORWARD_SECURE, + std::make_unique(quic::Perspective::IS_SERVER)); + quic_connection_.SetDefaultEncryptionLevel(quic::ENCRYPTION_FORWARD_SECURE); + quic_session_.OnConfigNegotiated(); - request_headers_.OnHeaderBlockStart(); - request_headers_.OnHeader(":authority", host_); - request_headers_.OnHeader(":method", "POST"); - request_headers_.OnHeader(":path", "/"); - request_headers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, - /*compressed_header_bytes=*/0); spdy_request_headers_[":authority"] = host_; spdy_request_headers_[":method"] = "POST"; spdy_request_headers_[":path"] = "/"; @@ -109,13 +106,6 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { } } - std::string bodyToStreamPayload(const std::string& body) { - if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { - return body; - } - return bodyToHttp3StreamPayload(body); - } - size_t receiveRequest(const std::string& payload, bool fin, size_t decoder_buffer_high_watermark) { EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) @@ -133,38 +123,19 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { quic_stream_->readDisable(true); } })); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_request_headers_), - bodyToStreamPayload(payload)); - quic::QuicStreamFrame frame(stream_id_, fin, 0, data); - quic_stream_->OnStreamFrame(frame); - EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - return data.length(); - } - quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), - request_headers_); - - quic::QuicStreamFrame frame(stream_id_, fin, 0, payload); + std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_request_headers_), + bodyToHttp3StreamPayload(payload)); + quic::QuicStreamFrame frame(stream_id_, fin, 0, data); quic_stream_->OnStreamFrame(frame); EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - return payload.length(); + return data.length(); } void receiveTrailers(size_t offset) { - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - spdy_trailers_["key1"] = "value1"; - std::string payload = spdyHeaderToHttp3StreamPayload(spdy_trailers_); - quic::QuicStreamFrame frame(stream_id_, true, offset, payload); - quic_stream_->OnStreamFrame(frame); - } else { - trailers_.OnHeaderBlockStart(); - trailers_.OnHeader("key1", "value1"); - // ":final-offset" is required and stripped off by quic. - trailers_.OnHeader(":final-offset", absl::StrCat("", offset)); - trailers_.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0); - quic_stream_->OnStreamHeaderList(/*fin=*/true, trailers_.uncompressed_header_bytes(), - trailers_); - } + spdy_trailers_["key1"] = "value1"; + std::string payload = spdyHeaderToHttp3StreamPayload(spdy_trailers_); + quic::QuicStreamFrame frame(stream_id_, true, offset, payload); + quic_stream_->OnStreamFrame(frame); } protected: @@ -185,33 +156,16 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { EnvoyQuicServerStream* quic_stream_; Http::MockRequestDecoder stream_decoder_; Http::MockStreamCallbacks stream_callbacks_; - quic::QuicHeaderList request_headers_; spdy::SpdyHeaderBlock spdy_request_headers_; Http::TestResponseHeaderMapImpl response_headers_; Http::TestResponseTrailerMapImpl response_trailers_; - quic::QuicHeaderList trailers_; spdy::SpdyHeaderBlock spdy_trailers_; std::string host_{"www.abc.com"}; std::string request_body_{"Hello world"}; }; -INSTANTIATE_TEST_SUITE_P(EnvoyQuicServerStreamTests, EnvoyQuicServerStreamTest, - testing::ValuesIn({true, false})); - -TEST_P(EnvoyQuicServerStreamTest, GetRequestAndResponse) { - quic::QuicHeaderList request_headers; - request_headers.OnHeaderBlockStart(); - request_headers.OnHeader(":authority", host_); - request_headers.OnHeader(":method", "GET"); - request_headers.OnHeader(":path", "/"); - // QUICHE stack doesn't coalesce Cookie headers for header compression optimization. - request_headers.OnHeader("cookie", "a=b"); - request_headers.OnHeader("cookie", "c=d"); - request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, - /*compressed_header_bytes=*/0); - - EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/!quic::VersionUsesHttp3( - quic_version_.transport_version))) +TEST_F(EnvoyQuicServerStreamTest, GetRequestAndResponse) { + EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { EXPECT_EQ(host_, headers->getHostValue()); EXPECT_EQ("/", headers->getPathValue()); @@ -224,33 +178,28 @@ TEST_P(EnvoyQuicServerStreamTest, GetRequestAndResponse) { headers->get(Http::Headers::get().Cookie)[0]->value().getStringView()); } })); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(stream_decoder_, decodeData(BufferStringEqual(""), /*end_stream=*/true)); - spdy::SpdyHeaderBlock spdy_headers; - spdy_headers[":authority"] = host_; - spdy_headers[":method"] = "GET"; - spdy_headers[":path"] = "/"; - spdy_headers.AppendValueOrAddHeader("cookie", "a=b"); - spdy_headers.AppendValueOrAddHeader("cookie", "c=d"); - std::string payload = spdyHeaderToHttp3StreamPayload(spdy_headers); - quic::QuicStreamFrame frame(stream_id_, true, 0, payload); - quic_stream_->OnStreamFrame(frame); - } else { - quic_stream_->OnStreamHeaderList(/*fin=*/true, request_headers.uncompressed_header_bytes(), - request_headers); - } + EXPECT_CALL(stream_decoder_, decodeData(BufferStringEqual(""), /*end_stream=*/true)); + spdy::SpdyHeaderBlock spdy_headers; + spdy_headers[":authority"] = host_; + spdy_headers[":method"] = "GET"; + spdy_headers[":path"] = "/"; + spdy_headers.AppendValueOrAddHeader("cookie", "a=b"); + spdy_headers.AppendValueOrAddHeader("cookie", "c=d"); + std::string payload = spdyHeaderToHttp3StreamPayload(spdy_headers); + quic::QuicStreamFrame frame(stream_id_, true, 0, payload); + quic_stream_->OnStreamFrame(frame); EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/true); } -TEST_P(EnvoyQuicServerStreamTest, PostRequestAndResponse) { +TEST_F(EnvoyQuicServerStreamTest, PostRequestAndResponse) { EXPECT_EQ(absl::nullopt, quic_stream_->http1StreamEncoderOptions()); receiveRequest(request_body_, true, request_body_.size() * 2); quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false); quic_stream_->encodeTrailers(response_trailers_); } -TEST_P(EnvoyQuicServerStreamTest, DecodeHeadersBodyAndTrailers) { +TEST_F(EnvoyQuicServerStreamTest, DecodeHeadersBodyAndTrailers) { size_t offset = receiveRequest(request_body_, false, request_body_.size() * 2); EXPECT_CALL(stream_decoder_, decodeTrailers_(_)) @@ -264,76 +213,53 @@ TEST_P(EnvoyQuicServerStreamTest, DecodeHeadersBodyAndTrailers) { EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); } -TEST_P(EnvoyQuicServerStreamTest, OutOfOrderTrailers) { - EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - return; - } - EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) - .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->getHostValue()); - EXPECT_EQ("/", headers->getPathValue()); - EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue()); - })); - quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), - request_headers_); - EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - - // Trailer should be delivered to HCM later after body arrives. - receiveTrailers(request_body_.length()); - - quic::QuicStreamFrame frame(stream_id_, false, 0, request_body_); - EXPECT_CALL(stream_decoder_, decodeData(_, _)) - .WillOnce(Invoke([this](Buffer::Instance& buffer, bool finished_reading) { - EXPECT_EQ(request_body_, buffer.toString()); - EXPECT_FALSE(finished_reading); - })); - - EXPECT_CALL(stream_decoder_, decodeTrailers_(_)) - .WillOnce(Invoke([](const Http::RequestTrailerMapPtr& headers) { - Http::LowerCaseString key1("key1"); - Http::LowerCaseString key2(":final-offset"); - EXPECT_EQ("value1", headers->get(key1)[0]->value().getStringView()); - EXPECT_TRUE(headers->get(key2).empty()); - })); - quic_stream_->OnStreamFrame(frame); -} - -TEST_P(EnvoyQuicServerStreamTest, ResetStreamByHCM) { +TEST_F(EnvoyQuicServerStreamTest, ResetStreamByHCM) { receiveRequest(request_body_, false, request_body_.size() * 2); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, _)); - } + EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, _)); EXPECT_CALL(quic_session_, MaybeSendRstStreamFrame(_, _, _)); - EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); - quic_stream_->resetStream(Http::StreamResetReason::LocalReset); + EXPECT_CALL(stream_callbacks_, + onResetStream(Http::StreamResetReason::LocalRefusedStreamReset, _)); + quic_stream_->resetStream(Http::StreamResetReason::LocalRefusedStreamReset); EXPECT_TRUE(quic_stream_->rst_sent()); } -TEST_P(EnvoyQuicServerStreamTest, EarlyResponseWithStopSending) { +TEST_F(EnvoyQuicServerStreamTest, ReceiveStopSending) { + size_t payload_offset = receiveRequest(request_body_, false, request_body_.size() * 2); + // Receiving STOP_SENDING alone should trigger upstream reset. + EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::RemoteReset, _)); + EXPECT_CALL(quic_session_, MaybeSendRstStreamFrame(_, _, _)); + quic_stream_->OnStopSending(quic::QUIC_STREAM_NO_ERROR); + EXPECT_FALSE(quic_stream_->read_side_closed()); + + // Following FIN should be discarded and the stream should be closed. + std::string second_part_request = bodyToHttp3StreamPayload("aaaa"); + EXPECT_CALL(stream_decoder_, decodeData(_, _)).Times(0u); + quic::QuicStreamFrame frame(stream_id_, true, payload_offset, second_part_request); + quic_stream_->OnStreamFrame(frame); + EXPECT_TRUE(quic_stream_->read_side_closed()); + EXPECT_TRUE(quic_stream_->write_side_closed()); +} + +TEST_F(EnvoyQuicServerStreamTest, EarlyResponseWithStopSending) { receiveRequest(request_body_, false, request_body_.size() * 2); // Write response headers with FIN before finish receiving request. quic_stream_->encodeHeaders(response_headers_, true); // Resetting the stream now means stop reading and sending QUIC_STREAM_NO_ERROR or STOP_SENDING. - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, _)); - } else { - EXPECT_CALL(quic_session_, MaybeSendRstStreamFrame(_, _, _)); - } + EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, _)); EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); quic_stream_->resetStream(Http::StreamResetReason::LocalReset); EXPECT_TRUE(quic_stream_->reading_stopped()); EXPECT_EQ(quic::QUIC_STREAM_NO_ERROR, quic_stream_->stream_error()); } -TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) { +TEST_F(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) { std::string large_request(1024, 'a'); // Sending such large request will cause read to be disabled. size_t payload_offset = receiveRequest(large_request, false, 512); EXPECT_FALSE(quic_stream_->HasBytesToRead()); // Disable reading one more time. quic_stream_->readDisable(true); - std::string second_part_request = bodyToStreamPayload("bbb"); + std::string second_part_request = bodyToHttp3StreamPayload("bbb"); // Receiving more data in the same event loop will push the receiving pipe line. EXPECT_CALL(stream_decoder_, decodeData(_, _)) .WillOnce(Invoke([](Buffer::Instance& buffer, bool finished_reading) { @@ -350,7 +276,7 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); // This data frame should also be buffered. - std::string last_part_request = bodyToStreamPayload("ccc"); + std::string last_part_request = bodyToHttp3StreamPayload("ccc"); quic::QuicStreamFrame frame2(stream_id_, false, payload_offset, last_part_request); quic_stream_->OnStreamFrame(frame2); payload_offset += last_part_request.length(); @@ -375,18 +301,10 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponLargePost) { } // Tests that readDisable() doesn't cause re-entry of OnBodyAvailable(). -TEST_P(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) { - EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) - .WillOnce(Invoke([this](const Http::RequestHeaderMapPtr& headers, bool) { - EXPECT_EQ(host_, headers->getHostValue()); - EXPECT_EQ("/", headers->getPathValue()); - EXPECT_EQ(Http::Headers::get().MethodValues.Post, headers->getMethodValue()); - })); - quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), - request_headers_); - EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - +TEST_F(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) { std::string payload(1024, 'a'); + size_t offset = receiveRequest(payload, false, 2048); + EXPECT_CALL(stream_decoder_, decodeData(_, _)) .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) { EXPECT_EQ(payload, buffer.toString()); @@ -395,14 +313,15 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) { // Re-enable reading should not trigger another decodeData. quic_stream_->readDisable(false); })); - std::string data = bodyToStreamPayload(payload); - quic::QuicStreamFrame frame(stream_id_, false, 0, data); + std::string data = bodyToHttp3StreamPayload(payload); + quic::QuicStreamFrame frame(stream_id_, false, offset, data); quic_stream_->OnStreamFrame(frame); + offset += data.length(); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); // The stream shouldn't be blocked in the next event loop. - std::string last_part_request = bodyToStreamPayload("bbb"); - quic::QuicStreamFrame frame2(stream_id_, true, data.length(), last_part_request); + std::string last_part_request = bodyToHttp3StreamPayload("bbb"); + quic::QuicStreamFrame frame2(stream_id_, true, offset, last_part_request); EXPECT_CALL(stream_decoder_, decodeData(_, _)) .WillOnce(Invoke([&](Buffer::Instance& buffer, bool finished_reading) { EXPECT_EQ("bbb", buffer.toString()); @@ -415,43 +334,29 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableAndReEnableImmediately) { EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); } -TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponHeaders) { +TEST_F(EnvoyQuicServerStreamTest, ReadDisableUponHeaders) { std::string payload(1024, 'a'); EXPECT_CALL(stream_decoder_, decodeHeaders_(_, /*end_stream=*/false)) .WillOnce(Invoke( [this](const Http::RequestHeaderMapPtr&, bool) { quic_stream_->readDisable(true); })); EXPECT_CALL(stream_decoder_, decodeData(_, _)); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_request_headers_), - bodyToStreamPayload(payload)); - quic::QuicStreamFrame frame(stream_id_, false, 0, data); - quic_stream_->OnStreamFrame(frame); - EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - } else { - quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers_.uncompressed_header_bytes(), - request_headers_); - EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); - - quic::QuicStreamFrame frame(stream_id_, false, 0, payload); - quic_stream_->OnStreamFrame(frame); - } + std::string data = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_request_headers_), + bodyToHttp3StreamPayload(payload)); + quic::QuicStreamFrame frame(stream_id_, false, 0, data); + quic_stream_->OnStreamFrame(frame); + EXPECT_TRUE(quic_stream_->FinishedReadingHeaders()); // Stream should be blocked in the next event loop. dispatcher_->run(Event::Dispatcher::RunType::NonBlock); // Receiving more date shouldn't trigger decoding. EXPECT_CALL(stream_decoder_, decodeData(_, _)).Times(0); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - std::string data = bodyToStreamPayload(payload); - quic::QuicStreamFrame frame(stream_id_, false, 0, data); - quic_stream_->OnStreamFrame(frame); - } else { - quic::QuicStreamFrame frame(stream_id_, false, 0, payload); - quic_stream_->OnStreamFrame(frame); - } + data = bodyToHttp3StreamPayload(payload); + quic::QuicStreamFrame frame2(stream_id_, false, 0, data); + quic_stream_->OnStreamFrame(frame2); EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); } -TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponTrailers) { +TEST_F(EnvoyQuicServerStreamTest, ReadDisableUponTrailers) { size_t payload_offset = receiveRequest(request_body_, false, request_body_.length() * 2); EXPECT_FALSE(quic_stream_->HasBytesToRead()); @@ -466,7 +371,7 @@ TEST_P(EnvoyQuicServerStreamTest, ReadDisableUponTrailers) { // Tests that the stream with a send buffer whose high limit is 16k and low // limit is 8k sends over 32kB response. -TEST_P(EnvoyQuicServerStreamTest, WatermarkSendBuffer) { +TEST_F(EnvoyQuicServerStreamTest, WatermarkSendBuffer) { receiveRequest(request_body_, true, request_body_.size() * 2); // Bump connection flow control window large enough not to cause connection @@ -523,12 +428,7 @@ TEST_P(EnvoyQuicServerStreamTest, WatermarkSendBuffer) { EXPECT_TRUE(quic_stream_->write_side_closed()); } -TEST_P(EnvoyQuicServerStreamTest, HeadersContributeToWatermarkIquic) { - if (!quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(stream_callbacks_, onResetStream(_, _)); - return; - } - +TEST_F(EnvoyQuicServerStreamTest, HeadersContributeToWatermarkIquic) { receiveRequest(request_body_, true, request_body_.size() * 2); // Bump connection flow control window large enough not to cause connection level flow control @@ -596,78 +496,51 @@ TEST_P(EnvoyQuicServerStreamTest, HeadersContributeToWatermarkIquic) { quic_stream_->encodeTrailers(response_trailers_); } -TEST_P(EnvoyQuicServerStreamTest, RequestHeaderTooLarge) { +TEST_F(EnvoyQuicServerStreamTest, RequestHeaderTooLarge) { // Bump stream flow control window to allow request headers larger than 16K. quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), 32 * 1024); quic_stream_->OnWindowUpdateFrame(window_update1); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, _)); - } + EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, _)); EXPECT_CALL(quic_session_, MaybeSendRstStreamFrame(_, _, _)); EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::LocalReset, _)); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - spdy::SpdyHeaderBlock spdy_headers; - spdy_headers[":authority"] = host_; - spdy_headers[":method"] = "POST"; - spdy_headers[":path"] = "/"; - // This header exceeds max header size limit and should cause stream reset. - spdy_headers["long_header"] = std::string(16 * 1024 + 1, 'a'); - std::string payload = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_headers), - bodyToStreamPayload(request_body_)); - quic::QuicStreamFrame frame(stream_id_, false, 0, payload); - quic_stream_->OnStreamFrame(frame); - } else { - quic::QuicHeaderList request_headers; - request_headers.set_max_header_list_size(16 * 1024); - request_headers.OnHeaderBlockStart(); - request_headers.OnHeader(":authority", host_); - request_headers.OnHeader(":method", "POST"); - request_headers.OnHeader(":path", "/"); - request_headers.OnHeader("long_header", std::string(16 * 1024 + 1, 'a')); - request_headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, - /*compressed_header_bytes=*/0); - quic_stream_->OnStreamHeaderList(/*fin=*/false, request_headers.uncompressed_header_bytes(), - request_headers); - } + spdy::SpdyHeaderBlock spdy_headers; + spdy_headers[":authority"] = host_; + spdy_headers[":method"] = "POST"; + spdy_headers[":path"] = "/"; + // This header exceeds max header size limit and should cause stream reset. + spdy_headers["long_header"] = std::string(16 * 1024 + 1, 'a'); + std::string payload = absl::StrCat(spdyHeaderToHttp3StreamPayload(spdy_headers), + bodyToHttp3StreamPayload(request_body_)); + quic::QuicStreamFrame frame(stream_id_, false, 0, payload); + quic_stream_->OnStreamFrame(frame); + EXPECT_TRUE(quic_stream_->rst_sent()); } -TEST_P(EnvoyQuicServerStreamTest, RequestTrailerTooLarge) { +TEST_F(EnvoyQuicServerStreamTest, RequestTrailerTooLarge) { // Bump stream flow control window to allow request headers larger than 16K. quic::QuicWindowUpdateFrame window_update1(quic::kInvalidControlFrameId, quic_stream_->id(), 20 * 1024); size_t offset = receiveRequest(request_body_, false, request_body_.size() * 2); quic_stream_->OnWindowUpdateFrame(window_update1); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, _)); - } + EXPECT_CALL(quic_session_, MaybeSendStopSendingFrame(_, _)); EXPECT_CALL(quic_session_, MaybeSendRstStreamFrame(_, _, _)); EXPECT_CALL(stream_callbacks_, onResetStream(Http::StreamResetReason::LocalReset, _)); - if (quic::VersionUsesHttp3(quic_version_.transport_version)) { - spdy::SpdyHeaderBlock spdy_trailers; - // This header exceeds max header size limit and should cause stream reset. - spdy_trailers["long_header"] = std::string(16 * 1024 + 1, 'a'); - std::string payload = spdyHeaderToHttp3StreamPayload(spdy_trailers); - quic::QuicStreamFrame frame(stream_id_, false, offset, payload); - quic_stream_->OnStreamFrame(frame); - } else { - quic::QuicHeaderList spdy_trailers; - spdy_trailers.set_max_header_list_size(16 * 1024); - spdy_trailers.OnHeaderBlockStart(); - spdy_trailers.OnHeader("long_header", std::string(16 * 1024 + 1, 'a')); - spdy_trailers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, - /*compressed_header_bytes=*/0); - quic_stream_->OnStreamHeaderList(/*fin=*/true, spdy_trailers.uncompressed_header_bytes(), - spdy_trailers); - } + spdy::SpdyHeaderBlock spdy_trailers; + // This header exceeds max header size limit and should cause stream reset. + spdy_trailers["long_header"] = std::string(16 * 1024 + 1, 'a'); + std::string payload = spdyHeaderToHttp3StreamPayload(spdy_trailers); + quic::QuicStreamFrame frame(stream_id_, false, offset, payload); + quic_stream_->OnStreamFrame(frame); + EXPECT_TRUE(quic_stream_->rst_sent()); } // Tests that closing connection is QUICHE write call stack doesn't mess up // watermark buffer accounting. -TEST_P(EnvoyQuicServerStreamTest, ConnectionCloseDuringEncoding) { +TEST_F(EnvoyQuicServerStreamTest, ConnectionCloseDuringEncoding) { receiveRequest(request_body_, true, request_body_.size() * 2); quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/false); std::string response(16 * 1024 + 1, 'a'); @@ -708,7 +581,7 @@ TEST_P(EnvoyQuicServerStreamTest, ConnectionCloseDuringEncoding) { // Tests that after end_stream is encoded, closing connection shouldn't call // onResetStream() callbacks. -TEST_P(EnvoyQuicServerStreamTest, ConnectionCloseAfterEndStreamEncoded) { +TEST_F(EnvoyQuicServerStreamTest, ConnectionCloseAfterEndStreamEncoded) { receiveRequest(request_body_, true, request_body_.size() * 2); EXPECT_CALL(quic_connection_, SendConnectionClosePacket(_, quic::NO_IETF_QUIC_ERROR, "Closed in WriteHeaders")); @@ -725,7 +598,7 @@ TEST_P(EnvoyQuicServerStreamTest, ConnectionCloseAfterEndStreamEncoded) { quic_stream_->encodeHeaders(response_headers_, /*end_stream=*/true); } -TEST_P(EnvoyQuicServerStreamTest, MetadataNotSupported) { +TEST_F(EnvoyQuicServerStreamTest, MetadataNotSupported) { Http::MetadataMap metadata_map = {{"key", "value"}}; Http::MetadataMapPtr metadata_map_ptr = std::make_unique(metadata_map); Http::MetadataMapVector metadata_map_vector; diff --git a/test/common/quic/envoy_quic_utils_test.cc b/test/common/quic/envoy_quic_utils_test.cc index 20a1dfe989690..63d9393e42c0e 100644 --- a/test/common/quic/envoy_quic_utils_test.cc +++ b/test/common/quic/envoy_quic_utils_test.cc @@ -19,6 +19,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::NiceMock; using testing::Return; namespace Envoy { @@ -43,13 +44,14 @@ TEST(EnvoyQuicUtilsTest, ConversionBetweenQuicAddressAndEnvoyAddress) { EXPECT_EQ(quic_addr.ToString(), envoy_addr->asStringView()); EXPECT_EQ(quic_addr, envoyIpAddressToQuicSocketAddress(envoy_addr->ip())); } + EXPECT_FALSE(envoyIpAddressToQuicSocketAddress(nullptr).IsInitialized()); } class MockHeaderValidator : public HeaderValidator { public: ~MockHeaderValidator() override = default; MOCK_METHOD(Http::HeaderUtility::HeaderValidationResult, validateHeader, - (const std::string& header_name, absl::string_view header_value)); + (absl::string_view header_name, absl::string_view header_value)); }; TEST(EnvoyQuicUtilsTest, HeadersConversion) { @@ -61,7 +63,11 @@ TEST(EnvoyQuicUtilsTest, HeadersConversion) { // converting to Envoy headers. headers_block.AppendValueOrAddHeader("key", "value1"); headers_block.AppendValueOrAddHeader("key", "value2"); - auto envoy_headers = spdyHeaderBlockToEnvoyHeaders(headers_block); + NiceMock validator; + absl::string_view details; + quic::QuicRstStreamErrorCode rst = quic::QUIC_REFUSED_STREAM; + auto envoy_headers = spdyHeaderBlockToEnvoyTrailers( + headers_block, 100, validator, details, rst); // Envoy header block is 1 header larger because QUICHE header block does coalescing. EXPECT_EQ(headers_block.size() + 1u, envoy_headers->size()); EXPECT_EQ("www.google.com", envoy_headers->getHostValue()); @@ -69,6 +75,7 @@ TEST(EnvoyQuicUtilsTest, HeadersConversion) { EXPECT_EQ("https", envoy_headers->getSchemeValue()); EXPECT_EQ("value1", envoy_headers->get(Http::LowerCaseString("key"))[0]->value().getStringView()); EXPECT_EQ("value2", envoy_headers->get(Http::LowerCaseString("key"))[1]->value().getStringView()); + EXPECT_EQ(rst, quic::QUIC_REFUSED_STREAM); // With no error it will be untouched. quic::QuicHeaderList quic_headers; quic_headers.OnHeaderBlockStart(); @@ -79,18 +86,17 @@ TEST(EnvoyQuicUtilsTest, HeadersConversion) { quic_headers.OnHeader("key", "value2"); quic_headers.OnHeader("key-to-drop", ""); quic_headers.OnHeaderBlockEnd(0, 0); - MockHeaderValidator validator; EXPECT_CALL(validator, validateHeader(_, _)) - .WillRepeatedly([](const std::string& header_name, absl::string_view) { + .WillRepeatedly([](absl::string_view header_name, absl::string_view) { if (header_name == "key-to-drop") { return Http::HeaderUtility::HeaderValidationResult::DROP; } return Http::HeaderUtility::HeaderValidationResult::ACCEPT; }); - absl::string_view details; - auto envoy_headers2 = - quicHeadersToEnvoyHeaders(quic_headers, validator, 100, details); + auto envoy_headers2 = quicHeadersToEnvoyHeaders( + quic_headers, validator, 100, details, rst); EXPECT_EQ(*envoy_headers, *envoy_headers2); + EXPECT_EQ(rst, quic::QUIC_REFUSED_STREAM); // With no error it will be untouched. quic::QuicHeaderList quic_headers2; quic_headers2.OnHeaderBlockStart(); @@ -100,14 +106,51 @@ TEST(EnvoyQuicUtilsTest, HeadersConversion) { quic_headers2.OnHeader("invalid_key", ""); quic_headers2.OnHeaderBlockEnd(0, 0); EXPECT_CALL(validator, validateHeader(_, _)) - .WillRepeatedly([](const std::string& header_name, absl::string_view) { + .WillRepeatedly([](absl::string_view header_name, absl::string_view) { if (header_name == "invalid_key") { return Http::HeaderUtility::HeaderValidationResult::REJECT; } return Http::HeaderUtility::HeaderValidationResult::ACCEPT; }); EXPECT_EQ(nullptr, quicHeadersToEnvoyHeaders(quic_headers2, validator, - 100, details)); + 100, details, rst)); + EXPECT_EQ(rst, quic::QUIC_BAD_APPLICATION_PAYLOAD); +} + +TEST(EnvoyQuicUtilsTest, HeadersSizeBounds) { + spdy::SpdyHeaderBlock headers_block; + headers_block[":authority"] = "www.google.com"; + headers_block[":path"] = "/index.hml"; + headers_block[":scheme"] = "https"; + headers_block["foo"] = std::string("bar\0eep\0baz", 11); + absl::string_view details; + // 6 headers are allowed. + NiceMock validator; + quic::QuicRstStreamErrorCode rst = quic::QUIC_REFUSED_STREAM; + EXPECT_NE(nullptr, spdyHeaderBlockToEnvoyTrailers( + headers_block, 6, validator, details, rst)); + // Given the cap is 6, make sure anything lower, exact or otherwise, is rejected. + EXPECT_EQ(nullptr, spdyHeaderBlockToEnvoyTrailers( + headers_block, 5, validator, details, rst)); + EXPECT_EQ("http3.too_many_trailers", details); + EXPECT_EQ(nullptr, spdyHeaderBlockToEnvoyTrailers( + headers_block, 4, validator, details, rst)); + EXPECT_EQ(rst, quic::QUIC_STREAM_EXCESSIVE_LOAD); +} + +TEST(EnvoyQuicUtilsTest, TrailerCharacters) { + spdy::SpdyHeaderBlock headers_block; + headers_block[":authority"] = "www.google.com"; + headers_block[":path"] = "/index.hml"; + headers_block[":scheme"] = "https"; + absl::string_view details; + NiceMock validator; + EXPECT_CALL(validator, validateHeader(_, _)) + .WillRepeatedly(Return(Http::HeaderUtility::HeaderValidationResult::REJECT)); + quic::QuicRstStreamErrorCode rst = quic::QUIC_REFUSED_STREAM; + EXPECT_EQ(nullptr, spdyHeaderBlockToEnvoyTrailers( + headers_block, 5, validator, details, rst)); + EXPECT_EQ(rst, quic::QUIC_BAD_APPLICATION_PAYLOAD); } } // namespace Quic diff --git a/test/common/quic/envoy_quic_writer_test.cc b/test/common/quic/envoy_quic_writer_test.cc index a0ee8b3ed5edd..3908fb82ba568 100644 --- a/test/common/quic/envoy_quic_writer_test.cc +++ b/test/common/quic/envoy_quic_writer_test.cc @@ -5,16 +5,17 @@ #include "source/common/network/address_impl.h" #include "source/common/network/io_socket_error_impl.h" +#include "source/common/network/io_socket_handle_impl.h" #include "source/common/network/udp_packet_writer_handler_impl.h" #include "source/common/quic/envoy_quic_packet_writer.h" #include "test/mocks/api/mocks.h" -#include "test/mocks/network/mocks.h" #include "test/test_common/threadsafe_singleton_injector.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::_; using testing::Return; namespace Envoy { @@ -23,7 +24,7 @@ namespace Quic { class EnvoyQuicWriterTest : public ::testing::Test { public: EnvoyQuicWriterTest() - : envoy_quic_writer_(std::make_unique(socket_.ioHandle())) { + : envoy_quic_writer_(std::make_unique(io_handle_)) { self_address_.FromString("::"); quic::QuicIpAddress peer_ip; peer_ip.FromString("::1"); @@ -50,7 +51,7 @@ class EnvoyQuicWriterTest : public ::testing::Test { protected: testing::NiceMock os_sys_calls_; TestThreadsafeSingletonInjector os_calls_{&os_sys_calls_}; - testing::NiceMock socket_; + Network::IoSocketHandleImpl io_handle_; quic::QuicIpAddress self_address_; quic::QuicSocketAddress peer_address_; EnvoyQuicPacketWriter envoy_quic_writer_; diff --git a/test/common/quic/platform/BUILD b/test/common/quic/platform/BUILD index c7031156309e9..e45df81e2704d 100644 --- a/test/common/quic/platform/BUILD +++ b/test/common/quic/platform/BUILD @@ -17,7 +17,7 @@ envoy_cc_test( "//source/common/quic/platform:quiche_flags_impl_lib", "//test/test_common:logging_lib", "//test/test_common:utility_lib", - "@com_googlesource_quiche//:http2_test_tools_random", + "@com_github_google_quiche//:http2_test_tools_random", ], ) @@ -45,19 +45,18 @@ envoy_cc_test( "//test/test_common:logging_lib", "//test/test_common:threadsafe_singleton_injector_lib", "//test/test_common:utility_lib", - "@com_googlesource_quiche//:epoll_server_lib", - "@com_googlesource_quiche//:quic_core_buffer_allocator_lib", - "@com_googlesource_quiche//:quic_core_error_codes_lib", - "@com_googlesource_quiche//:quic_core_types_lib", - "@com_googlesource_quiche//:quic_platform_expect_bug", - "@com_googlesource_quiche//:quic_platform_mem_slice_span", - "@com_googlesource_quiche//:quic_platform_mem_slice_storage", - "@com_googlesource_quiche//:quic_platform_mock_log", - "@com_googlesource_quiche//:quic_platform_sleep", - "@com_googlesource_quiche//:quic_platform_system_event_loop", - "@com_googlesource_quiche//:quic_platform_test", - "@com_googlesource_quiche//:quic_platform_test_output", - "@com_googlesource_quiche//:quic_platform_thread", + "@com_github_google_quiche//:epoll_server_lib", + "@com_github_google_quiche//:quic_core_buffer_allocator_lib", + "@com_github_google_quiche//:quic_core_error_codes_lib", + "@com_github_google_quiche//:quic_core_types_lib", + "@com_github_google_quiche//:quic_platform_expect_bug", + "@com_github_google_quiche//:quic_platform_mem_slice_storage", + "@com_github_google_quiche//:quic_platform_mock_log", + "@com_github_google_quiche//:quic_platform_sleep", + "@com_github_google_quiche//:quic_platform_system_event_loop", + "@com_github_google_quiche//:quic_platform_test", + "@com_github_google_quiche//:quic_platform_test_output", + "@com_github_google_quiche//:quic_platform_thread", ], ) @@ -96,9 +95,9 @@ envoy_cc_test_library( }), tags = ["nofips"], deps = [ - "@com_googlesource_quiche//:quic_core_clock_lib", - "@com_googlesource_quiche//:quic_platform", - "@com_googlesource_quiche//:quic_platform_epoll_lib", + "@com_github_google_quiche//:quic_core_clock_lib", + "@com_github_google_quiche//:quic_platform", + "@com_github_google_quiche//:quic_platform_epoll_lib", ], ) @@ -106,7 +105,7 @@ envoy_cc_test_library( name = "quic_platform_epoll_impl_lib", hdrs = ["quic_epoll_impl.h"], tags = ["nofips"], - deps = ["@com_googlesource_quiche//:epoll_server_lib"], + deps = ["@com_github_google_quiche//:epoll_server_lib"], ) envoy_cc_test_library( @@ -114,8 +113,8 @@ envoy_cc_test_library( hdrs = ["quic_expect_bug_impl.h"], tags = ["nofips"], deps = [ - "@com_googlesource_quiche//:quic_platform_base", - "@com_googlesource_quiche//:quic_platform_mock_log", + "@com_github_google_quiche//:quic_platform_base", + "@com_github_google_quiche//:quic_platform_mock_log", ], ) @@ -123,17 +122,7 @@ envoy_cc_test_library( name = "quic_platform_mock_log_impl_lib", hdrs = ["quic_mock_log_impl.h"], tags = ["nofips"], - deps = ["@com_googlesource_quiche//:quic_platform_base"], -) - -envoy_cc_test_library( - name = "quic_platform_test_mem_slice_vector_impl_lib", - hdrs = ["quic_test_mem_slice_vector_impl.h"], - tags = ["nofips"], - deps = [ - "//envoy/buffer:buffer_interface", - "@com_googlesource_quiche//:quic_platform_mem_slice_span", - ], + deps = ["@com_github_google_quiche//:quic_platform_base"], ) envoy_cc_test_library( @@ -167,8 +156,8 @@ envoy_cc_test_library( tags = ["nofips"], deps = [ "//test/test_common:file_system_for_test_lib", - "@com_googlesource_quiche//:quic_platform_base", - "@com_googlesource_quiche//:quiche_common_platform", + "@com_github_google_quiche//:quic_platform_base", + "@com_github_google_quiche//:quiche_common_platform", ], ) diff --git a/test/common/quic/platform/quic_platform_test.cc b/test/common/quic/platform/quic_platform_test.cc index 73d13b22a733d..d0ec8aceb95ca 100644 --- a/test/common/quic/platform/quic_platform_test.cc +++ b/test/common/quic/platform/quic_platform_test.cc @@ -37,7 +37,6 @@ #include "quiche/quic/platform/api/quic_hostname_utils.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/quic/platform/api/quic_mem_slice.h" -#include "quiche/quic/platform/api/quic_mem_slice_span.h" #include "quiche/quic/platform/api/quic_mem_slice_storage.h" #include "quiche/quic/platform/api/quic_mock_log.h" #include "quiche/quic/platform/api/quic_mutex.h" @@ -625,46 +624,5 @@ TEST(EnvoyQuicMemSliceTest, ConstructMemSliceFromBuffer) { EXPECT_TRUE(fragment_releaser_called); } -TEST(EnvoyQuicMemSliceTest, ConstructQuicMemSliceSpan) { - Envoy::Buffer::OwnedImpl buffer; - std::string str(1024, 'a'); - buffer.add(str); - quic::QuicMemSlice slice{quic::QuicMemSliceImpl(buffer, str.length())}; - - QuicMemSliceSpan span(&slice); - EXPECT_EQ(1024u, span.total_length()); - EXPECT_EQ(str, span.GetData(0)); - span.ConsumeAll([](quic::QuicMemSlice&& mem_slice) { mem_slice.Reset(); }); - EXPECT_EQ(0u, span.total_length()); - - QuicMemSlice slice3; - { - quic::QuicMemSlice slice2{quic::QuicMemSliceImpl(std::make_unique(5), 5u)}; - - QuicMemSliceSpan span2(&slice2); - EXPECT_EQ(5u, span2.total_length()); - span2.ConsumeAll([&slice3](quic::QuicMemSlice&& mem_slice) { slice3 = std::move(mem_slice); }); - EXPECT_EQ(0u, span2.total_length()); - } - slice3.Reset(); -} - -TEST(EnvoyQuicMemSliceTest, QuicMemSliceStorage) { - std::string str(512, 'a'); - iovec iov = {const_cast(str.data()), str.length()}; - SimpleBufferAllocator allocator; - QuicMemSliceStorage storage(&iov, 1, &allocator, 1024); - // Test copy constructor. - QuicMemSliceStorage other = storage; - QuicMemSliceSpan span = storage.ToSpan(); - EXPECT_EQ(1u, span.NumSlices()); - EXPECT_EQ(str.length(), span.total_length()); - EXPECT_EQ(str, span.GetData(0)); - QuicMemSliceSpan span_other = other.ToSpan(); - EXPECT_EQ(1u, span_other.NumSlices()); - EXPECT_EQ(str, span_other.GetData(0)); - EXPECT_NE(span_other.GetData(0).data(), span.GetData(0).data()); -} - } // namespace } // namespace quic diff --git a/test/common/quic/platform/quic_test_mem_slice_vector_impl.h b/test/common/quic/platform/quic_test_mem_slice_vector_impl.h deleted file mode 100644 index 1f727111d2563..0000000000000 --- a/test/common/quic/platform/quic_test_mem_slice_vector_impl.h +++ /dev/null @@ -1,35 +0,0 @@ -#pragma once - -// NOLINT(namespace-envoy) -// -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "source/common/buffer/buffer_impl.h" -#include "source/common/quic/platform/quic_mem_slice_span_impl.h" - -namespace quic { -namespace test { - -class QuicTestMemSliceVectorImpl { -public: - explicit QuicTestMemSliceVectorImpl(std::vector> buffers) { - for (auto it : buffers) { - auto fragment = new Envoy::Buffer::BufferFragmentImpl( - it.first, it.second, - [](const void*, size_t, const Envoy::Buffer::BufferFragmentImpl* fragment) { - delete fragment; - }); - buffer_.addBufferFragment(*fragment); - } - } - - QuicMemSliceSpanImpl span() { return QuicMemSliceSpanImpl(buffer_); } - -private: - Envoy::Buffer::OwnedImpl buffer_; -}; - -} // namespace test -} // namespace quic diff --git a/test/common/quic/platform/quiche_test_impl.h b/test/common/quic/platform/quiche_test_impl.h index 8362180983c5a..8bd47699341dd 100644 --- a/test/common/quic/platform/quiche_test_impl.h +++ b/test/common/quic/platform/quiche_test_impl.h @@ -20,7 +20,7 @@ template using QuicheTestWithParamImpl = ::testing::TestWithParam; // NOLINTNEXTLINE(readability-identifier-naming) inline std::string QuicheGetCommonSourcePathImpl() { std::string test_srcdir(getenv("TEST_SRCDIR")); - return absl::StrCat(test_srcdir, "/external/com_googlesource_quiche/quiche/common"); + return absl::StrCat(test_srcdir, "/external/com_github_google_quiche/quiche/common"); } } // namespace test diff --git a/test/common/quic/quic_io_handle_wrapper_test.cc b/test/common/quic/quic_io_handle_wrapper_test.cc index 899f7903d5892..e9c7d566207ed 100644 --- a/test/common/quic/quic_io_handle_wrapper_test.cc +++ b/test/common/quic/quic_io_handle_wrapper_test.cc @@ -4,6 +4,7 @@ #include "envoy/common/platform.h" #include "source/common/network/address_impl.h" +#include "source/common/network/io_socket_handle_impl.h" #include "source/common/quic/quic_io_handle_wrapper.h" #include "test/mocks/api/mocks.h" @@ -16,19 +17,23 @@ using testing::ByMove; using testing::Return; +using testing::ReturnRef; namespace Envoy { namespace Quic { class QuicIoHandleWrapperTest : public testing::Test { public: - QuicIoHandleWrapperTest() : wrapper_(std::make_unique(socket_.ioHandle())) { + QuicIoHandleWrapperTest() { + real_io_handle_ = std::make_unique(); + ON_CALL(socket_, ioHandle()).WillByDefault(ReturnRef(*real_io_handle_)); + wrapper_ = std::make_unique(socket_.ioHandle()); EXPECT_TRUE(wrapper_->isOpen()); EXPECT_FALSE(socket_.ioHandle().isOpen()); } - ~QuicIoHandleWrapperTest() override = default; protected: + Network::IoHandlePtr real_io_handle_; testing::NiceMock socket_; std::unique_ptr wrapper_; testing::StrictMock os_sys_calls_; diff --git a/test/common/quic/test_proof_verifier.h b/test/common/quic/test_proof_verifier.h index 3a0596a87b310..8546f92fd37ad 100644 --- a/test/common/quic/test_proof_verifier.h +++ b/test/common/quic/test_proof_verifier.h @@ -5,7 +5,7 @@ namespace Envoy { namespace Quic { -// A test quic::ProofVerifier which always approves the certs and signature. +// A test quic::ProofVerifier which always approves the certs. class TestProofVerifier : public EnvoyQuicProofVerifierBase { public: // quic::ProofVerifier @@ -18,14 +18,6 @@ class TestProofVerifier : public EnvoyQuicProofVerifierBase { std::unique_ptr /*callback*/) override { return quic::QUIC_SUCCESS; } - -protected: - // EnvoyQuicProofVerifierBase - bool verifySignature(const std::string& /*server_config*/, absl::string_view /*chlo_hash*/, - const std::string& /*cert*/, const std::string& /*signature*/, - std::string* /*error_details*/) override { - return true; - } }; } // namespace Quic diff --git a/test/common/quic/test_utils.h b/test/common/quic/test_utils.h index 192f0d4465c65..3999d237c1ff6 100644 --- a/test/common/quic/test_utils.h +++ b/test/common/quic/test_utils.h @@ -102,7 +102,7 @@ class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterMana MOCK_METHOD(quic::QuicConsumedData, WritevData, (quic::QuicStreamId id, size_t write_length, quic::QuicStreamOffset offset, quic::StreamSendingState state, quic::TransmissionType type, - absl::optional level)); + quic::EncryptionLevel level)); MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id)); MOCK_METHOD(void, MaybeSendRstStreamFrame, (quic::QuicStreamId id, quic::QuicRstStreamErrorCode error, @@ -188,7 +188,7 @@ class MockEnvoyQuicClientSession : public EnvoyQuicClientSession { MOCK_METHOD(quic::QuicConsumedData, WritevData, (quic::QuicStreamId id, size_t write_length, quic::QuicStreamOffset offset, quic::StreamSendingState state, quic::TransmissionType type, - absl::optional level)); + quic::EncryptionLevel level)); MOCK_METHOD(bool, ShouldYield, (quic::QuicStreamId id)); MOCK_METHOD(void, dumpState, (std::ostream&, int), (const)); @@ -210,44 +210,12 @@ class MockEnvoyQuicClientSession : public EnvoyQuicClientSession { QuicStatNames quic_stat_names_{stats_store_.symbolTable()}; }; -Buffer::OwnedImpl -generateChloPacketToSend(quic::ParsedQuicVersion quic_version, quic::QuicConfig& quic_config, - quic::QuicCryptoServerConfig& crypto_config, - quic::QuicConnectionId connection_id, quic::QuicClock& clock, - const quic::QuicSocketAddress& server_address, - const quic::QuicSocketAddress& client_address, std::string sni) { - if (quic_version.UsesTls()) { - std::unique_ptr packet = - std::move(quic::test::GetFirstFlightOfPackets(quic_version, quic_config, connection_id)[0]); - return Buffer::OwnedImpl(packet->data(), packet->length()); - } - quic::CryptoHandshakeMessage chlo = quic::test::crypto_test_utils::GenerateDefaultInchoateCHLO( - &clock, quic_version.transport_version, &crypto_config); - chlo.SetVector(quic::kCOPT, quic::QuicTagVector{quic::kREJ}); - chlo.SetStringPiece(quic::kSNI, sni); - quic::CryptoHandshakeMessage full_chlo; - quic::QuicReferenceCountedPointer signed_config( - new quic::QuicSignedServerConfig); - quic::QuicCompressedCertsCache cache( - quic::QuicCompressedCertsCache::kQuicCompressedCertsCacheSize); - quic::test::crypto_test_utils::GenerateFullCHLO(chlo, &crypto_config, server_address, - client_address, quic_version.transport_version, - &clock, signed_config, &cache, &full_chlo); - // Overwrite version label to the version passed in. - full_chlo.SetVersion(quic::kVER, quic_version); - quic::QuicConfig quic_config_tmp; - quic_config_tmp.ToHandshakeMessage(&full_chlo, quic_version.transport_version); - - std::string packet_content(full_chlo.GetSerialized().AsStringPiece()); - quic::ParsedQuicVersionVector supported_versions{quic_version}; - auto encrypted_packet = - std::unique_ptr(quic::test::ConstructEncryptedPacket( - connection_id, quic::EmptyQuicConnectionId(), - /*version_flag=*/true, /*reset_flag*/ false, - /*packet_number=*/1, packet_content, quic::CONNECTION_ID_PRESENT, - quic::CONNECTION_ID_ABSENT, quic::PACKET_4BYTE_PACKET_NUMBER, &supported_versions)); - - return Buffer::OwnedImpl(encrypted_packet->data(), encrypted_packet->length()); +Buffer::OwnedImpl generateChloPacketToSend(quic::ParsedQuicVersion quic_version, + quic::QuicConfig& quic_config, + quic::QuicConnectionId connection_id) { + std::unique_ptr packet = + std::move(quic::test::GetFirstFlightOfPackets(quic_version, quic_config, connection_id)[0]); + return Buffer::OwnedImpl(packet->data(), packet->length()); } void setQuicConfigWithDefaultValues(quic::QuicConfig* config) { @@ -265,12 +233,6 @@ void setQuicConfigWithDefaultValues(quic::QuicConfig* config) { config, quic::kMinimumFlowControlSendWindow); } -enum class QuicVersionType { - GquicQuicCrypto, - GquicTls, - Iquic, -}; - std::string spdyHeaderToHttp3StreamPayload(const spdy::SpdyHeaderBlock& header) { quic::test::NoopQpackStreamSenderDelegate encoder_stream_sender_delegate; quic::test::NoopDecoderStreamErrorDelegate decoder_stream_error_delegate; @@ -293,33 +255,25 @@ std::string bodyToHttp3StreamPayload(const std::string& body) { } // A test suite with variation of ip version and a knob to turn on/off IETF QUIC implementation. -class QuicMultiVersionTest - : public testing::TestWithParam> {}; +class QuicMultiVersionTest : public testing::TestWithParam< + std::pair> { +}; -std::vector> generateTestParam() { - std::vector> param; +std::vector> generateTestParam() { + std::vector> param; for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { - param.emplace_back(ip_version, QuicVersionType::GquicQuicCrypto); - param.emplace_back(ip_version, QuicVersionType::GquicTls); - param.emplace_back(ip_version, QuicVersionType::Iquic); + for (const auto& quic_version : quic::CurrentSupportedHttp3Versions()) { + param.emplace_back(ip_version, quic_version); + } } - return param; } std::string testParamsToString( - const ::testing::TestParamInfo>& + const ::testing::TestParamInfo>& params) { std::string ip_version = params.param.first == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6"; - switch (params.param.second) { - case QuicVersionType::GquicQuicCrypto: - return absl::StrCat(ip_version, "_UseGQuicWithQuicCrypto"); - case QuicVersionType::GquicTls: - return absl::StrCat(ip_version, "_UseGQuicWithTLS"); - case QuicVersionType::Iquic: - return absl::StrCat(ip_version, "_UseHttp3"); - } - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + return absl::StrCat(ip_version, quic::QuicVersionToString(params.param.second.transport_version)); } } // namespace Quic diff --git a/test/common/router/BUILD b/test/common/router/BUILD index ed6f767661b50..596a8a86e7a3f 100644 --- a/test/common/router/BUILD +++ b/test/common/router/BUILD @@ -153,7 +153,6 @@ envoy_cc_test( "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", - "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 3a40e003bc4bc..71bcbb2e18633 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -145,17 +145,7 @@ Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::st envoy::config::route::v3::RouteConfiguration parseRouteConfigurationFromYaml(const std::string& yaml) { envoy::config::route::v3::RouteConfiguration route_config; - // Most tests should be v3 and not boost. - bool avoid_boosting = true; - // If we're under TestDeprecatedV2Api, allow boosting. - auto* runtime = Runtime::LoaderSingleton::getExisting(); - if (runtime != nullptr && runtime->threadsafeSnapshot()->runtimeFeatureEnabled( - "envoy.test_only.broken_in_production.enable_deprecated_v2_api")) { - avoid_boosting = false; - } - // Load the file and keep the annotations (in case of an upgrade) to make sure - // validate() observes the upgrade. - TestUtility::loadFromYaml(yaml, route_config, true, avoid_boosting); + TestUtility::loadFromYaml(yaml, route_config); TestUtility::validate(route_config); return route_config; } @@ -2131,6 +2121,87 @@ TEST_F(RouteMatcherTest, QueryParamMatchedRouting) { } } +TEST_F(RouteMatcherTest, DynamicMetadataMatchedRouting) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: test + domains: ["www.example.com"] + routes: + - match: + prefix: "/" + dynamic_metadata: + - filter: example + path: + - key: k1 + value: + string_match: + exact: foo + route: + cluster: foo + - match: + prefix: "/" + dynamic_metadata: + - filter: example + path: + - key: k2 + value: + string_match: + exact: bar + - filter: example + path: + - key: k3 + value: + string_match: + exact: bar + route: + cluster: bar + - match: + prefix: "/" + route: + cluster: default +)EOF"; + + factory_context_.cluster_manager_.initializeClusters({"foo", "bar", "default"}, {}); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + Http::TestRequestHeaderMapImpl headers = genHeaders("www.example.com", "/", "GET"); + NiceMock stream_info; + + { + envoy::config::core::v3::Metadata metadata; + (*metadata.mutable_filter_metadata())["example"] = MessageUtil::keyValueStruct("k1", "foo"); + EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); + EXPECT_EQ("foo", config.route(headers, stream_info, 0)->routeEntry()->clusterName()); + } + + { + envoy::config::core::v3::Metadata metadata; + (*metadata.mutable_filter_metadata())["example"] = + MessageUtil::keyValueStruct({{"k2", "bar"}, {"k3", "bar"}}); + EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); + EXPECT_EQ("bar", config.route(headers, stream_info, 0)->routeEntry()->clusterName()); + } + + { + envoy::config::core::v3::Metadata metadata; + (*metadata.mutable_filter_metadata())["example"] = MessageUtil::keyValueStruct("k2", "bar"); + EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); + EXPECT_EQ("default", config.route(headers, stream_info, 0)->routeEntry()->clusterName()); + } + + { + envoy::config::core::v3::Metadata metadata; + (*metadata.mutable_filter_metadata())["example"] = MessageUtil::keyValueStruct("k3", "bar"); + EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); + EXPECT_EQ("default", config.route(headers, stream_info, 0)->routeEntry()->clusterName()); + } + + { + envoy::config::core::v3::Metadata metadata; + EXPECT_CALL(Const(stream_info), dynamicMetadata()).WillRepeatedly(ReturnRef(metadata)); + EXPECT_EQ("default", config.route(headers, stream_info, 0)->routeEntry()->clusterName()); + } +} + class RouterMatcherHashPolicyTest : public testing::Test, public ConfigImplTestBase { protected: RouterMatcherHashPolicyTest() @@ -3023,8 +3094,8 @@ TEST_F(RouteMatcherTest, ShadowClusterNotFound) { - match: prefix: "/foo" route: - request_mirror_policy: - cluster: some_cluster + request_mirror_policies: + - cluster: some_cluster cluster: www2 )EOF"; @@ -3169,9 +3240,7 @@ TEST_F(RouteMatcherTest, ClusterNotFoundResponseCodeConfig404) { config.route(headers, 0)->routeEntry()->clusterNotFoundResponseCode()); } -// TODO(dereka) DEPRECATED_FEATURE_TEST can be removed when `request_mirror_policy` is removed. -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { - TestDeprecatedV2Api _deprecated_v2_api; +TEST_F(RouteMatcherTest, RequestMirrorPolicies) { const std::string yaml = R"EOF( virtual_hosts: - name: www2 @@ -3181,19 +3250,19 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { - match: prefix: "/foo" route: - request_mirror_policy: - cluster: some_cluster + request_mirror_policies: + - cluster: some_cluster cluster: www2 - match: prefix: "/bar" route: - request_mirror_policy: - cluster: some_cluster2 - runtime_fraction: - default_value: - numerator: 20 - denominator: HUNDRED - runtime_key: foo + request_mirror_policies: + - cluster: some_cluster2 + runtime_fraction: + default_value: + numerator: 20 + denominator: HUNDRED + runtime_key: foo cluster: www2 - match: prefix: "/baz" @@ -3243,75 +3312,8 @@ TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(Shadow)) { EXPECT_EQ("foo", boz_shadow_policies[1]->runtimeKey()); } -TEST_F(RouteMatcherTest, DEPRECATED_FEATURE_TEST(ShadowPolicyAndPolicies)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: -- name: www2 - domains: - - www.lyft.com - routes: - - match: - prefix: "/foo" - route: - request_mirror_policy: - cluster: some_cluster - request_mirror_policies: - - cluster: some_other_cluster - cluster: www2 - )EOF"; - - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "Cannot specify both request_mirror_policy and request_mirror_policies"); -} - class RouteConfigurationV2 : public testing::Test, public ConfigImplTestBase {}; -// When removing runtime_key: this test can be removed. -TEST_F(RouteConfigurationV2, DEPRECATED_FEATURE_TEST(RequestMirrorPolicy)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: - - name: mirror - domains: [mirror.lyft.com] - routes: - - match: { prefix: "/"} - route: - cluster: foo - request_mirror_policy: - cluster: foo_mirror - runtime_key: will_be_ignored - runtime_fraction: - default_value: - numerator: 20 - denominator: HUNDRED - runtime_key: mirror_key - - )EOF"; - - factory_context_.cluster_manager_.initializeClusters({"foo", "foo_mirror"}, {}); - TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); - - EXPECT_EQ("foo_mirror", config.route(genHeaders("mirror.lyft.com", "/foo", "GET"), 0) - ->routeEntry() - ->shadowPolicies()[0] - ->cluster()); - - // `runtime_fraction` takes precedence over the deprecated `runtime_key` field. - EXPECT_EQ("mirror_key", config.route(genHeaders("mirror.lyft.com", "/foo", "GET"), 0) - ->routeEntry() - ->shadowPolicies()[0] - ->runtimeKey()); - - const auto& default_value = config.route(genHeaders("mirror.lyft.com", "/foo", "GET"), 0) - ->routeEntry() - ->shadowPolicies()[0] - ->defaultValue(); - EXPECT_EQ(20, default_value.numerator()); - EXPECT_EQ(envoy::type::v3::FractionalPercent::HUNDRED, default_value.denominator()); -} - TEST_F(RouteMatcherTest, Retry) { const std::string yaml = R"EOF( virtual_hosts: @@ -4495,7 +4497,7 @@ max_direct_response_body_size_bytes: 1024 genRedirectHeaders("redirect.lyft.com", "/https", false, false); EXPECT_EQ("https://redirect.lyft.com/https", config.route(headers, 0)->directResponseEntry()->newPath(headers)); - EXPECT_EQ(nullptr, config.route(headers, 0)->perFilterConfig("bar")); + EXPECT_EQ(nullptr, config.route(headers, 0)->mostSpecificPerFilterConfig("bar")); } { Http::TestRequestHeaderMapImpl headers = @@ -4899,10 +4901,10 @@ TEST_F(RouteMatcherTest, WeightedClusters) { EXPECT_EQ(Http::Code::ServiceUnavailable, route_entry->clusterNotFoundResponseCode()); EXPECT_EQ(nullptr, route_entry->corsPolicy()); EXPECT_EQ("test_value", - Envoy::Config::Metadata::metadataValue(&route_entry->metadata(), "com.bar.foo", "baz") + Envoy::Config::Metadata::metadataValue(&route->metadata(), "com.bar.foo", "baz") .string_value()); - EXPECT_EQ(nullptr, route_entry->typedMetadata().get(baz_factory.name())); - EXPECT_EQ("meh", route_entry->typedMetadata().get(baz_factory.name())->name); + EXPECT_EQ(nullptr, route->typedMetadata().get(baz_factory.name())); + EXPECT_EQ("meh", route->typedMetadata().get(baz_factory.name())->name); EXPECT_EQ("hello", route->decorator()->getOperation()); Http::TestResponseHeaderMapImpl response_headers; @@ -5275,6 +5277,71 @@ TEST_F(RouteMatcherTest, TestWeightedClusterHeaderManipulation) { } } +TEST_F(RouteMatcherTest, WeightedClusterInvalidConfigWithBothNameAndClusterHeader) { + const std::string yaml = R"EOF( + virtual_hosts: + - name: www1 + domains: ["www1.lyft.com"] + routes: + - match: { prefix: "/" } + route: + weighted_clusters: + total_weight: 100 + clusters: + - cluster_header: some_header + name: some_name + weight: 30 + - name: cluster1 + weight: 30 + - name: cluster2 + weight: 40 + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Only one of name or cluster_header can be specified"); +} + +TEST_F(RouteMatcherTest, WeightedClusterInvalidConfigWithNoClusterSpecifier) { + const std::string yaml = R"EOF( + virtual_hosts: + - name: www1 + domains: ["www1.lyft.com"] + routes: + - match: { prefix: "/" } + route: + weighted_clusters: + total_weight: 30 + clusters: + - weight: + 30 + )EOF"; + + EXPECT_THROW_WITH_MESSAGE( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "At least one of name or cluster_header need to be specified"); +} + +TEST_F(RouteMatcherTest, WeightedClusterInvalidConfigWithInvalidHttpHeader) { + const std::string yaml = R"EOF( + virtual_hosts: + - name: www1 + domains: ["www1.lyft.com"] + routes: + - match: { prefix: "/" } + route: + weighted_clusters: + total_weight: 30 + clusters: + - cluster_header: "test\r" + weight: 30 + )EOF"; + + EXPECT_THROW_WITH_REGEX( + TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, + "Proto constraint validation failed.*"); +} + TEST(NullConfigImplTest, All) { NullConfigImpl config; NiceMock stream_info; @@ -5390,8 +5457,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigMissingPathSpecifier) "RouteValidationError.Match"); } -TEST_F(BadHttpRouteConfigurationsTest, DEPRECATED_FEATURE_TEST(BadRouteEntryConfigPrefixAndRegex)) { - TestDeprecatedV2Api _deprecated_v2_api; +TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndRegex) { const std::string yaml = R"EOF( virtual_hosts: - name: www2 @@ -5400,7 +5466,9 @@ TEST_F(BadHttpRouteConfigurationsTest, DEPRECATED_FEATURE_TEST(BadRouteEntryConf routes: - match: prefix: "/" - regex: "/[bc]at" + safe_regex: + google_re2: {} + regex: "/[bc]at" route: cluster: www2 )EOF"; @@ -5408,7 +5476,7 @@ TEST_F(BadHttpRouteConfigurationsTest, DEPRECATED_FEATURE_TEST(BadRouteEntryConf #ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|regex)' " + "invalid value oneof field 'path_specifier' is already set. Cannot set '(prefix|safe_regex)' " "for " "type oneof"); #else @@ -5418,7 +5486,7 @@ TEST_F(BadHttpRouteConfigurationsTest, DEPRECATED_FEATURE_TEST(BadRouteEntryConf "already set. Cannot set 'prefix' for " "type oneof"), ::testing::ContainsRegex("invalid value oneof field 'path_specifier' is " - "already set. Cannot set 'regex' for " + "already set. Cannot set 'safe_regex' for " "type oneof"))); #endif } @@ -5439,8 +5507,7 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigNoAction) { "caused by field: \"action\", reason: is required"); } -TEST_F(BadHttpRouteConfigurationsTest, DEPRECATED_FEATURE_TEST(BadRouteEntryConfigPathAndRegex)) { - TestDeprecatedV2Api _deprecated_v2_api; +TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPathAndRegex) { const std::string yaml = R"EOF( virtual_hosts: - name: www2 @@ -5449,7 +5516,9 @@ TEST_F(BadHttpRouteConfigurationsTest, DEPRECATED_FEATURE_TEST(BadRouteEntryConf routes: - match: path: "/foo" - regex: "/[bc]at" + safe_regex: + google_re2: {} + regex: "/[bc]at" route: cluster: www2 )EOF"; @@ -5457,7 +5526,7 @@ TEST_F(BadHttpRouteConfigurationsTest, DEPRECATED_FEATURE_TEST(BadRouteEntryConf #ifndef GTEST_USES_SIMPLE_RE EXPECT_THROW_WITH_REGEX( TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "invalid value oneof field 'path_specifier' is already set. Cannot set '(path|regex)' " + "invalid value oneof field 'path_specifier' is already set. Cannot set '(path|safe_regex)' " "for " "type oneof"); #else @@ -5467,7 +5536,7 @@ TEST_F(BadHttpRouteConfigurationsTest, DEPRECATED_FEATURE_TEST(BadRouteEntryConf "already set. Cannot set 'path' for " "type oneof"), ::testing::ContainsRegex("invalid value oneof field 'path_specifier' is " - "already set. Cannot set 'regex' for " + "already set. Cannot set 'safe_regex' for " "type oneof"))); #endif } @@ -5482,7 +5551,9 @@ TEST_F(BadHttpRouteConfigurationsTest, BadRouteEntryConfigPrefixAndPathAndRegex) - match: prefix: "/" path: "/foo" - regex: "/[bc]at" + safe_regex: + google_re2: {} + regex: "/[bc]at" route: cluster: www2 )EOF"; @@ -5730,27 +5801,6 @@ TEST_F(RoutePropertyTest, TestRouteCorsConfig) { EXPECT_EQ(cors_policy->allowCredentials(), true); } -TEST_F(RoutePropertyTest, DEPRECATED_FEATURE_TEST(TestBadCorsConfig)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: -- name: default - domains: - - "*" - routes: - - match: - prefix: "/api" - route: - cluster: ats - cors: - enabled: 0 -)EOF"; - - EXPECT_THROW_WITH_REGEX( - TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "Unable to parse JSON as proto .*: invalid value 0 for type TYPE_BOOL"); -} - TEST_F(RouteMatcherTest, Decorator) { const std::string yaml = R"EOF( virtual_hosts: @@ -6320,8 +6370,8 @@ name: foo checkPathMatchCriterion(route.get(), "/", PathMatchType::Prefix); const auto route_entry = route->routeEntry(); - const auto& metadata = route_entry->metadata(); - const auto& typed_metadata = route_entry->typedMetadata(); + const auto& metadata = route->metadata(); + const auto& typed_metadata = route->typedMetadata(); EXPECT_EQ("test_value", Envoy::Config::Metadata::metadataValue(&metadata, "com.bar.foo", "baz").string_value()); @@ -6898,6 +6948,131 @@ TEST_F(RouteMatcherTest, HeaderMatchedRoutingV2) { } } +TEST_F(RouteMatcherTest, EnsureMatchingAllConditions) { + const std::string yaml = R"EOF( +virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + tls_context: + presented: true + runtime_fraction: + default_value: + numerator: 50 + denominator: MILLION + runtime_key: "bogus_key" + headers: + - name: :path + string_match: + prefix: "/foo" + query_parameters: + - name: param + string_match: + exact: test + grpc: {} + route: + cluster: bar_cluster + - match: + prefix: "/" + route: + cluster: foo_cluster + )EOF"; + + factory_context_.cluster_manager_.initializeClusters({"foo_cluster", "bar_cluster"}, {}); + + NiceMock stream_info; + auto connection_info = std::make_shared(); + EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true)); + EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); + + // all the conditions are matched. + { + Runtime::MockSnapshot snapshot; + ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot)); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); + + EXPECT_CALL(snapshot, + featureEnabled("bogus_key", + testing::Matcher(_), 41)) + .WillRepeatedly(Return(true)); + auto headers = genHeaders("www.lyft.com", "/foo?param=test", "GET"); + headers.addCopy("content-type", "application/grpc"); + EXPECT_EQ("bar_cluster", config.route(headers, stream_info, 41)->routeEntry()->clusterName()); + } + // not a grpc + { + Runtime::MockSnapshot snapshot; + ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot)); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); + + EXPECT_CALL(snapshot, + featureEnabled("bogus_key", + testing::Matcher(_), 41)) + .WillRepeatedly(Return(true)); + auto headers = genHeaders("www.lyft.com", "/foo?param=test", "GET"); + EXPECT_EQ("foo_cluster", config.route(headers, stream_info, 41)->routeEntry()->clusterName()); + } + // runtime_fraction isn't matched. + { + Runtime::MockSnapshot snapshot; + ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot)); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); + + EXPECT_CALL(snapshot, + featureEnabled("bogus_key", + testing::Matcher(_), 43)) + .WillRepeatedly(Return(false)); + auto headers = genHeaders("www.lyft.com", "/foo?param=test", "GET"); + headers.addCopy("content-type", "application/grpc"); + EXPECT_EQ("foo_cluster", config.route(headers, stream_info, 43)->routeEntry()->clusterName()); + } + // header isn't matched. + { + Runtime::MockSnapshot snapshot; + ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot)); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); + + EXPECT_CALL(snapshot, + featureEnabled("bogus_key", + testing::Matcher(_), 41)) + .WillRepeatedly(Return(true)); + auto headers = genHeaders("www.lyft.com", "/?param=test", "GET"); + headers.addCopy("content-type", "application/grpc"); + EXPECT_EQ("foo_cluster", config.route(headers, stream_info, 41)->routeEntry()->clusterName()); + } + // no tls. + { + Runtime::MockSnapshot snapshot; + ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot)); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); + + EXPECT_CALL(snapshot, + featureEnabled("bogus_key", + testing::Matcher(_), 41)) + .WillRepeatedly(Return(true)); + auto headers = genHeaders("www.lyft.com", "/foo?param=test", "GET"); + headers.addCopy("content-type", "application/grpc"); + EXPECT_EQ("foo_cluster", config.route(headers, 41)->routeEntry()->clusterName()); + } + // missing query parameter. + { + Runtime::MockSnapshot snapshot; + ON_CALL(factory_context_.runtime_loader_, snapshot()).WillByDefault(ReturnRef(snapshot)); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, false); + + EXPECT_CALL(snapshot, + featureEnabled("bogus_key", + testing::Matcher(_), 41)) + .WillRepeatedly(Return(true)); + auto headers = genHeaders("www.lyft.com", "/foo", "GET"); + headers.addCopy("content-type", "application/grpc"); + EXPECT_EQ("foo_cluster", config.route(headers, stream_info, 41)->routeEntry()->clusterName()); + } +} + // Test Route Matching based on connection Tls Context. // Validate configured and default settings are routed to the correct cluster. TEST_F(RouteMatcherTest, TlsContextMatching) { @@ -6952,7 +7127,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-cert-test", "GET"); EXPECT_EQ("server_peer-cert-presented", @@ -6964,7 +7139,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(false)); EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-cert-test", "GET"); EXPECT_EQ("server_peer-cert-not-presented", @@ -6976,7 +7151,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(false)); EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-cert-no-tls-context-match", "GET"); @@ -6989,7 +7164,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-cert-no-tls-context-match", "GET"); @@ -7002,7 +7177,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-validated-cert-test", "GET"); @@ -7015,7 +7190,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(false)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-validated-cert-test", "GET"); @@ -7028,7 +7203,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(false)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-cert-no-tls-context-match", "GET"); @@ -7041,7 +7216,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { auto connection_info = std::make_shared(); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillRepeatedly(Return(true)); EXPECT_CALL(*connection_info, peerCertificateValidated()).WillRepeatedly(Return(true)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-cert-no-tls-context-match", "GET"); @@ -7052,7 +7227,7 @@ TEST_F(RouteMatcherTest, TlsContextMatching) { { NiceMock stream_info; std::shared_ptr connection_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); Http::TestRequestHeaderMapImpl headers = genHeaders("www.lyft.com", "/peer-cert-no-tls-context-match", "GET"); @@ -7531,20 +7706,22 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { } }; - void checkEach(const std::string& yaml, uint32_t expected_entry, uint32_t expected_route, - uint32_t expected_vhost) { + void checkEach(const std::string& yaml, uint32_t expected_most_specific_config, + absl::InlinedVector& expected_traveled_config) { const TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); const auto route = config.route(genHeaders("www.foo.com", "/", "GET"), 0); - const auto* route_entry = route->routeEntry(); - const auto& vhost = route_entry->virtualHost(); + absl::InlinedVector traveled_cfg; - check(route_entry->perFilterConfigTyped(factory_.name()), expected_entry, - "route entry"); - check(route->perFilterConfigTyped(factory_.name()), expected_route, - "route"); - check(vhost.perFilterConfigTyped(factory_.name()), expected_vhost, - "virtual host"); + check(dynamic_cast( + route->mostSpecificPerFilterConfig(factory_.name())), + expected_most_specific_config, "most specific config"); + route->traversePerFilterConfig( + factory_.name(), [&](const Router::RouteSpecificFilterConfig& cfg) { + auto* typed_cfg = dynamic_cast(&cfg); + traveled_cfg.push_back(typed_cfg->config_.seconds()); + }); + ASSERT_EQ(expected_traveled_config, traveled_cfg); } void check(const DerivedFilterConfig* cfg, uint32_t expected_seconds, std::string source) { @@ -7560,13 +7737,15 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { optional_http_filters); const auto route = config.route(genHeaders("www.foo.com", "/", "GET"), 0); - const auto* route_entry = route->routeEntry(); - const auto& vhost = route_entry->virtualHost(); + absl::InlinedVector traveled_cfg; - EXPECT_EQ(nullptr, - route_entry->perFilterConfigTyped(default_factory_.name())); - EXPECT_EQ(nullptr, route->perFilterConfigTyped(default_factory_.name())); - EXPECT_EQ(nullptr, vhost.perFilterConfigTyped(default_factory_.name())); + EXPECT_EQ(nullptr, route->mostSpecificPerFilterConfig(factory_.name())); + route->traversePerFilterConfig( + factory_.name(), [&](const Router::RouteSpecificFilterConfig& cfg) { + auto* typed_cfg = dynamic_cast(&cfg); + traveled_cfg.push_back(typed_cfg->config_.seconds()); + }); + EXPECT_EQ(0, traveled_cfg.size()); } TestFilterConfig factory_; @@ -7576,64 +7755,6 @@ class PerFilterConfigsTest : public testing::Test, public ConfigImplTestBase { registered_default_factory_; }; -TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(TypedConfigFilterError)) { - TestDeprecatedV2Api _deprecated_v2_api; - { - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - per_filter_config: { unknown.filter: {} } - typed_per_filter_config: - unknown.filter: - "@type": type.googleapis.com/google.protobuf.Timestamp -)EOF"; - - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), - EnvoyException, "Only one of typed_configs or configs can be specified"); - } - - { - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - per_filter_config: { unknown.filter: {} } - typed_per_filter_config: - unknown.filter: - "@type": type.googleapis.com/google.protobuf.Timestamp -)EOF"; - - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), - EnvoyException, "Only one of typed_configs or configs can be specified"); - } -} - -TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(UnknownFilterStruct)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - per_filter_config: { unknown.filter: {} } -)EOF"; - - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl(parseRouteConfigurationFromYaml(yaml), factory_context_, true), EnvoyException, - "Didn't find a registered implementation for name: 'unknown.filter'"); -} - TEST_F(PerFilterConfigsTest, UnknownFilterAny) { const std::string yaml = R"EOF( virtual_hosts: @@ -7652,47 +7773,6 @@ TEST_F(PerFilterConfigsTest, UnknownFilterAny) { "Didn't find a registered implementation for name: 'unknown.filter'"); } -// Test that a trivially specified NamedHttpFilterConfigFactory ignores per_filter_config without -// error. -TEST_F(PerFilterConfigsTest, - DEPRECATED_FEATURE_TEST(DefaultFilterImplementationStructPerVirtualHost)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - per_filter_config: { test.default.filter: { seconds: 123} } -)EOF"; - - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.check_unsupported_typed_per_filter_config", "false"}}); - - factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkNoPerFilterConfig(yaml); -} - -TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(DefaultFilterImplementationStructPerRoute)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - per_filter_config: { test.default.filter: { seconds: 123} } -)EOF"; - - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.check_unsupported_typed_per_filter_config", "false"}}); - - factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkNoPerFilterConfig(yaml); -} - TEST_F(PerFilterConfigsTest, DefaultFilterImplementationAnyPerVirtualHost) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( @@ -7737,46 +7817,6 @@ TEST_F(PerFilterConfigsTest, DefaultFilterImplementationAnyPerRoute) { checkNoPerFilterConfig(yaml); } -// Test that a trivially specified NamedHttpFilterConfigFactory reject unsupported -// per_filter_config. -TEST_F(PerFilterConfigsTest, - DEPRECATED_FEATURE_TEST(DefaultFilterImplementationStructWithCheckPerVirtualHost)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - per_filter_config: { test.default.filter: { seconds: 123} } -)EOF"; - - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), - EnvoyException, - "The filter test.default.filter doesn't support virtual host-specific configurations"); -} - -TEST_F(PerFilterConfigsTest, - DEPRECATED_FEATURE_TEST(DefaultFilterImplementationStructWithCheckPerRoute)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - per_filter_config: { test.default.filter: { seconds: 123} } -)EOF"; - - EXPECT_THROW_WITH_MESSAGE( - TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true), - EnvoyException, - "The filter test.default.filter doesn't support virtual host-specific configurations"); -} - TEST_F(PerFilterConfigsTest, DefaultFilterImplementationAnyWithCheckPerVirtualHost) { const std::string yaml = R"EOF( virtual_hosts: @@ -7941,23 +7981,6 @@ TEST_F(PerFilterConfigsTest, PerRouteWithOptionalUnknownFilter) { checkNoPerFilterConfig(yaml, optional_http_filters); } -TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(RouteLocalConfig)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: baz } - per_filter_config: { test.filter: { seconds: 123 } } - per_filter_config: { test.filter: { seconds: 456 } } -)EOF"; - - factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkEach(yaml, 123, 123, 456); -} - TEST_F(PerFilterConfigsTest, RouteLocalTypedConfig) { const std::string yaml = R"EOF( virtual_hosts: @@ -7979,28 +8002,34 @@ TEST_F(PerFilterConfigsTest, RouteLocalTypedConfig) { )EOF"; factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkEach(yaml, 123, 123, 456); + absl::InlinedVector expected_traveled_config({456, 123}); + checkEach(yaml, 123, expected_traveled_config); } -TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(WeightedClusterConfig)) { - TestDeprecatedV2Api _deprecated_v2_api; +TEST_F(PerFilterConfigsTest, RouteLocalTypedConfigWithDirectResponse) { const std::string yaml = R"EOF( virtual_hosts: - name: bar domains: ["*"] routes: - match: { prefix: "/" } - route: - weighted_clusters: - clusters: - - name: baz - weight: 100 - per_filter_config: { test.filter: { seconds: 789 } } - per_filter_config: { test.filter: { seconds: 1011 } } + direct_response: + status: 200 + typed_per_filter_config: + test.filter: + "@type": type.googleapis.com/google.protobuf.Timestamp + value: + seconds: 123 + typed_per_filter_config: + test.filter: + "@type": type.googleapis.com/google.protobuf.Struct + value: + seconds: 456 )EOF"; factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkEach(yaml, 789, 789, 1011); + absl::InlinedVector expected_traveled_config({456, 123}); + checkEach(yaml, 123, expected_traveled_config); } TEST_F(PerFilterConfigsTest, WeightedClusterTypedConfig) { @@ -8028,28 +8057,8 @@ TEST_F(PerFilterConfigsTest, WeightedClusterTypedConfig) { )EOF"; factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkEach(yaml, 789, 789, 1011); -} - -TEST_F(PerFilterConfigsTest, DEPRECATED_FEATURE_TEST(WeightedClusterFallthroughConfig)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( -virtual_hosts: - - name: bar - domains: ["*"] - routes: - - match: { prefix: "/" } - route: - weighted_clusters: - clusters: - - name: baz - weight: 100 - per_filter_config: { test.filter: { seconds: 1213 } } - per_filter_config: { test.filter: { seconds: 1415 } } -)EOF"; - - factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkEach(yaml, 1213, 1213, 1415); + absl::InlinedVector expected_traveled_config({1011, 789}); + checkEach(yaml, 789, expected_traveled_config); } TEST_F(PerFilterConfigsTest, WeightedClusterFallthroughTypedConfig) { @@ -8077,7 +8086,8 @@ TEST_F(PerFilterConfigsTest, WeightedClusterFallthroughTypedConfig) { )EOF"; factory_context_.cluster_manager_.initializeClusters({"baz"}, {}); - checkEach(yaml, 1213, 1213, 1415); + absl::InlinedVector expected_traveled_config({1415, 1213}); + checkEach(yaml, 1213, expected_traveled_config); } class RouteMatchOverrideTest : public testing::Test, public ConfigImplTestBase {}; diff --git a/test/common/router/header_formatter_test.cc b/test/common/router/header_formatter_test.cc index f9ec6d274c757..02a466020d291 100644 --- a/test/common/router/header_formatter_test.cc +++ b/test/common/router/header_formatter_test.cc @@ -38,10 +38,9 @@ using ::testing::Return; using ::testing::ReturnPointee; using ::testing::ReturnRef; -static envoy::config::route::v3::Route parseRouteFromV3Yaml(const std::string& yaml, - bool avoid_boosting = true) { +static envoy::config::route::v3::Route parseRouteFromV3Yaml(const std::string& yaml) { envoy::config::route::v3::Route route; - TestUtility::loadFromYaml(yaml, route, false, avoid_boosting); + TestUtility::loadFromYaml(yaml, route); return route; } @@ -84,18 +83,18 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalPortVariable) // Validate for IPv4 address auto address = Network::Address::InstanceConstSharedPtr{ new Network::Address::Ipv4Instance("127.1.2.3", 8443)}; - stream_info.downstream_address_provider_->setLocalAddress(address); + stream_info.downstream_connection_info_provider_->setLocalAddress(address); testFormatting(stream_info, "DOWNSTREAM_LOCAL_PORT", "8443"); // Validate for IPv6 address address = Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance("::1", 9443)}; - stream_info.downstream_address_provider_->setLocalAddress(address); + stream_info.downstream_connection_info_provider_->setLocalAddress(address); testFormatting(stream_info, "DOWNSTREAM_LOCAL_PORT", "9443"); // Validate for Pipe address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance("/foo")}; - stream_info.downstream_address_provider_->setLocalAddress(address); + stream_info.downstream_connection_info_provider_->setLocalAddress(address); testFormatting(stream_info, "DOWNSTREAM_LOCAL_PORT", ""); } @@ -147,7 +146,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerUriSanVariable auto connection_info = std::make_shared>(); const std::vector sans{"san"}; ON_CALL(*connection_info, uriSanPeerCertificate()).WillByDefault(Return(sans)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_URI_SAN", "san"); } @@ -156,7 +155,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerUriSanVariable auto connection_info = std::make_shared>(); const std::vector sans{"san1", "san2"}; ON_CALL(*connection_info, uriSanPeerCertificate()).WillByDefault(Return(sans)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_URI_SAN", "san1,san2"); } @@ -165,13 +164,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerUriSanEmpty) { auto connection_info = std::make_shared>(); ON_CALL(*connection_info, uriSanPeerCertificate()) .WillByDefault(Return(std::vector())); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_URI_SAN", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_URI_SAN", EMPTY_STRING); } @@ -180,7 +179,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalUriSanVariabl auto connection_info = std::make_shared>(); const std::vector sans{"san"}; ON_CALL(*connection_info, uriSanLocalCertificate()).WillByDefault(Return(sans)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_LOCAL_URI_SAN", "san"); } @@ -189,7 +188,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalUriSanVariabl auto connection_info = std::make_shared>(); const std::vector sans{"san1", "san2"}; ON_CALL(*connection_info, uriSanLocalCertificate()).WillByDefault(Return(sans)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_LOCAL_URI_SAN", "san1,san2"); } @@ -198,13 +197,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalUriSanVariabl auto connection_info = std::make_shared>(); ON_CALL(*connection_info, uriSanLocalCertificate()) .WillByDefault(Return(std::vector())); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_LOCAL_URI_SAN", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalUriSanNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_LOCAL_URI_SAN", EMPTY_STRING); } @@ -213,7 +212,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalSubject) { auto connection_info = std::make_shared>(); std::string subject = "subject"; ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(subject)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_LOCAL_SUBJECT", "subject"); } @@ -222,13 +221,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalSubjectEmpty) auto connection_info = std::make_shared>(); std::string subject; ON_CALL(*connection_info, subjectLocalCertificate()).WillByDefault(ReturnRef(subject)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_LOCAL_SUBJECT", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamLocalSubjectNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_LOCAL_SUBJECT", EMPTY_STRING); } @@ -237,7 +236,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsSessionId) { auto connection_info = std::make_shared>(); std::string session_id = "deadbeef"; ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_TLS_SESSION_ID", "deadbeef"); } @@ -246,13 +245,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsSessionIdEmpty) auto connection_info = std::make_shared>(); std::string session_id; ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_TLS_SESSION_ID", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsSessionIdNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_TLS_SESSION_ID", EMPTY_STRING); } @@ -261,7 +260,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsCipher) { auto connection_info = std::make_shared>(); ON_CALL(*connection_info, ciphersuiteString()) .WillByDefault(Return("TLS_DHE_RSA_WITH_AES_256_GCM_SHA384")); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_TLS_CIPHER", "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"); } @@ -269,13 +268,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsCipherEmpty) { NiceMock stream_info; auto connection_info = std::make_shared>(); ON_CALL(*connection_info, ciphersuiteString()).WillByDefault(Return("")); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_TLS_CIPHER", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsCipherNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_TLS_CIPHER", EMPTY_STRING); } @@ -284,7 +283,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsVersion) { auto connection_info = std::make_shared>(); std::string tls_version = "TLSv1.2"; ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tls_version)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_TLS_VERSION", "TLSv1.2"); } @@ -292,13 +291,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsVersionEmpty) { NiceMock stream_info; auto connection_info = std::make_shared>(); ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(EMPTY_STRING)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_TLS_VERSION", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamTlsVersionNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_TLS_VERSION", EMPTY_STRING); } @@ -307,7 +306,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256Fingerpr auto connection_info = std::make_shared>(); std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; ON_CALL(*connection_info, sha256PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_256", "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"); } @@ -317,13 +316,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256Fingerpr auto connection_info = std::make_shared>(); std::string expected_sha; ON_CALL(*connection_info, sha256PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_256", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha256FingerprintNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_256", EMPTY_STRING); } @@ -332,7 +331,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1Fingerprin auto connection_info = std::make_shared>(); std::string expected_sha = "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"; ON_CALL(*connection_info, sha1PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", "685a2db593d5f86d346cb1a297009c3b467ad77f1944aa799039a2fb3d531f3f"); } @@ -342,13 +341,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1Fingerprin auto connection_info = std::make_shared>(); std::string expected_sha; ON_CALL(*connection_info, sha1PeerCertificateDigest()).WillByDefault(ReturnRef(expected_sha)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSha1FingerprintNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_FINGERPRINT_1", EMPTY_STRING); } @@ -357,7 +356,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSerial) { auto connection_info = std::make_shared>(); const std::string serial_number = "b8b5ecc898f2124a"; ON_CALL(*connection_info, serialNumberPeerCertificate()).WillByDefault(ReturnRef(serial_number)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_SERIAL", "b8b5ecc898f2124a"); } @@ -366,13 +365,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSerialEmpty) { auto connection_info = std::make_shared>(); const std::string serial_number; ON_CALL(*connection_info, serialNumberPeerCertificate()).WillByDefault(ReturnRef(serial_number)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_SERIAL", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSerialNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_SERIAL", EMPTY_STRING); } @@ -382,7 +381,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerIssuer) { const std::string issuer_peer = "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"; ON_CALL(*connection_info, issuerPeerCertificate()).WillByDefault(ReturnRef(issuer_peer)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_ISSUER", "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"); } @@ -392,13 +391,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerIssuerEmpty) { auto connection_info = std::make_shared>(); const std::string issuer_peer; ON_CALL(*connection_info, issuerPeerCertificate()).WillByDefault(ReturnRef(issuer_peer)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_ISSUER", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerIssuerNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_ISSUER", EMPTY_STRING); } @@ -408,7 +407,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSubject) { const std::string subject_peer = "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"; ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(subject_peer)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_SUBJECT", "CN=Test CA,OU=Lyft Engineering,O=Lyft,L=San Francisco,ST=California,C=US"); } @@ -418,13 +417,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSubjectEmpty) auto connection_info = std::make_shared>(); const std::string subject_peer; ON_CALL(*connection_info, subjectPeerCertificate()).WillByDefault(ReturnRef(subject_peer)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_SUBJECT", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerSubjectNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_SUBJECT", EMPTY_STRING); } @@ -434,7 +433,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCert) { std::string expected_cert = ""; ON_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate()) .WillByDefault(ReturnRef(expected_cert)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT", expected_cert); } @@ -444,13 +443,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertEmpty) { std::string expected_cert; ON_CALL(*connection_info, urlEncodedPemEncodedPeerCertificate()) .WillByDefault(ReturnRef(expected_cert)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT", EMPTY_STRING); } @@ -461,7 +460,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVStart) { TestUtility::parseTime("Dec 18 01:50:34 2018 GMT", "%b %e %H:%M:%S %Y GMT"); SystemTime startTime = absl::ToChronoTime(abslStartTime); ON_CALL(*connection_info, validFromPeerCertificate()).WillByDefault(Return(startTime)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT_V_START", "2018-12-18T01:50:34.000Z"); } @@ -472,7 +471,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVStartCust TestUtility::parseTime("Dec 18 01:50:34 2018 GMT", "%b %e %H:%M:%S %Y GMT"); SystemTime startTime = absl::ToChronoTime(abslStartTime); ON_CALL(*connection_info, validFromPeerCertificate()).WillByDefault(Return(startTime)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT_V_START(%b %e %H:%M:%S %Y %Z)", "Dec 18 01:50:34 2018 UTC"); } @@ -481,13 +480,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVStartEmpt NiceMock stream_info; auto connection_info = std::make_shared>(); ON_CALL(*connection_info, validFromPeerCertificate()).WillByDefault(Return(absl::nullopt)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT_V_START", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVStartNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT_V_START", EMPTY_STRING); } @@ -498,7 +497,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVEnd) { TestUtility::parseTime("Dec 17 01:50:34 2020 GMT", "%b %e %H:%M:%S %Y GMT"); SystemTime startTime = absl::ToChronoTime(abslStartTime); ON_CALL(*connection_info, expirationPeerCertificate()).WillByDefault(Return(startTime)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT_V_END", "2020-12-17T01:50:34.000Z"); } @@ -509,7 +508,7 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVEndCustom TestUtility::parseTime("Dec 17 01:50:34 2020 GMT", "%b %e %H:%M:%S %Y GMT"); SystemTime startTime = absl::ToChronoTime(abslStartTime); ON_CALL(*connection_info, expirationPeerCertificate()).WillByDefault(Return(startTime)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT_V_END(%b %e %H:%M:%S %Y %Z)", "Dec 17 01:50:34 2020 UTC"); } @@ -518,13 +517,13 @@ TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVEndEmpty) NiceMock stream_info; auto connection_info = std::make_shared>(); ON_CALL(*connection_info, expirationPeerCertificate()).WillByDefault(Return(absl::nullopt)); - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT_V_END", EMPTY_STRING); } TEST_F(StreamInfoHeaderFormatterTest, TestFormatWithDownstreamPeerCertVEndNoTls) { NiceMock stream_info; - EXPECT_CALL(stream_info, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info.downstream_connection_info_provider_->setSslConnection(nullptr); testFormatting(stream_info, "DOWNSTREAM_PEER_CERT_V_END", EMPTY_STRING); } diff --git a/test/common/router/rds_impl_test.cc b/test/common/router/rds_impl_test.cc index 6ed9e8f1985f9..05aba26966cc4 100644 --- a/test/common/router/rds_impl_test.cc +++ b/test/common/router/rds_impl_test.cc @@ -750,9 +750,9 @@ class RouteConfigProviderManagerImplTest : public RdsTestBase { }; envoy::config::route::v3::RouteConfiguration -parseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { +parseRouteConfigurationFromV3Yaml(const std::string& yaml) { envoy::config::route::v3::RouteConfiguration route_config; - TestUtility::loadFromYaml(yaml, route_config, true, avoid_boosting); + TestUtility::loadFromYaml(yaml, route_config); return route_config; } diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 5c6b57ce3f59c..5f71cbacec72a 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -28,10 +28,9 @@ namespace Envoy { namespace Router { namespace { -envoy::config::route::v3::RateLimit parseRateLimitFromV3Yaml(const std::string& yaml_string, - bool avoid_boosting = true) { +envoy::config::route::v3::RateLimit parseRateLimitFromV3Yaml(const std::string& yaml_string) { envoy::config::route::v3::RateLimit rate_limit; - TestUtility::loadFromYaml(yaml_string, rate_limit, false, avoid_boosting); + TestUtility::loadFromYaml(yaml_string, rate_limit); TestUtility::validate(rate_limit); return rate_limit; } @@ -85,7 +84,7 @@ class RateLimitConfiguration : public testing::Test { TestUtility::loadFromYaml(yaml, route_config); config_ = std::make_unique(route_config, OptionalHttpFilters(), factory_context_, any_validation_visitor_, true); - stream_info_.downstream_address_provider_->setRemoteAddress(default_remote_address_); + stream_info_.downstream_connection_info_provider_->setRemoteAddress(default_remote_address_); } NiceMock factory_context_; @@ -142,12 +141,12 @@ TEST_F(RateLimitConfiguration, NoRateLimitPolicy) { factory_context_.cluster_manager_.initializeClusters({"www2"}, {}); setupTest(yaml); - auto* route = - config_->route(genHeaders("www.lyft.com", "/bar", "GET"), stream_info_, 0)->routeEntry(); - ON_CALL(Const(stream_info_), routeEntry()).WillByDefault(testing::Return(route)); + auto route = config_->route(genHeaders("www.lyft.com", "/bar", "GET"), stream_info_, 0); + auto* route_entry = route->routeEntry(); + ON_CALL(Const(stream_info_), route()).WillByDefault(testing::Return(route)); - EXPECT_EQ(0U, route->rateLimitPolicy().getApplicableRateLimit(0).size()); - EXPECT_TRUE(route->rateLimitPolicy().empty()); + EXPECT_EQ(0U, route_entry->rateLimitPolicy().getApplicableRateLimit(0).size()); + EXPECT_TRUE(route_entry->rateLimitPolicy().empty()); } TEST_F(RateLimitConfiguration, TestGetApplicationRateLimit) { @@ -168,13 +167,13 @@ TEST_F(RateLimitConfiguration, TestGetApplicationRateLimit) { factory_context_.cluster_manager_.initializeClusters({"www2"}, {}); setupTest(yaml); - auto* route = - config_->route(genHeaders("www.lyft.com", "/foo", "GET"), stream_info_, 0)->routeEntry(); - ON_CALL(Const(stream_info_), routeEntry()).WillByDefault(testing::Return(route)); + auto route = config_->route(genHeaders("www.lyft.com", "/foo", "GET"), stream_info_, 0); + auto* route_entry = route->routeEntry(); + ON_CALL(Const(stream_info_), route()).WillByDefault(testing::Return(route)); - EXPECT_FALSE(route->rateLimitPolicy().empty()); + EXPECT_FALSE(route_entry->rateLimitPolicy().empty()); std::vector> rate_limits = - route->rateLimitPolicy().getApplicableRateLimit(0); + route_entry->rateLimitPolicy().getApplicableRateLimit(0); EXPECT_EQ(1U, rate_limits.size()); std::vector descriptors; @@ -203,12 +202,12 @@ TEST_F(RateLimitConfiguration, TestVirtualHost) { factory_context_.cluster_manager_.initializeClusters({"www2test"}, {}); setupTest(yaml); - auto* route = - config_->route(genHeaders("www.lyft.com", "/bar", "GET"), stream_info_, 0)->routeEntry(); - ON_CALL(Const(stream_info_), routeEntry()).WillByDefault(testing::Return(route)); + auto route = config_->route(genHeaders("www.lyft.com", "/bar", "GET"), stream_info_, 0); + auto* route_entry = route->routeEntry(); + ON_CALL(Const(stream_info_), route()).WillByDefault(testing::Return(route)); std::vector> rate_limits = - route->virtualHost().rateLimitPolicy().getApplicableRateLimit(0); + route_entry->virtualHost().rateLimitPolicy().getApplicableRateLimit(0); EXPECT_EQ(1U, rate_limits.size()); std::vector descriptors; @@ -248,12 +247,12 @@ TEST_F(RateLimitConfiguration, Stages) { factory_context_.cluster_manager_.initializeClusters({"www2test"}, {}); setupTest(yaml); - auto* route = - config_->route(genHeaders("www.lyft.com", "/foo", "GET"), stream_info_, 0)->routeEntry(); - ON_CALL(Const(stream_info_), routeEntry()).WillByDefault(testing::Return(route)); + auto route = config_->route(genHeaders("www.lyft.com", "/foo", "GET"), stream_info_, 0); + auto* route_entry = route->routeEntry(); + ON_CALL(Const(stream_info_), route()).WillByDefault(testing::Return(route)); std::vector> rate_limits = - route->rateLimitPolicy().getApplicableRateLimit(0); + route_entry->rateLimitPolicy().getApplicableRateLimit(0); EXPECT_EQ(2U, rate_limits.size()); std::vector descriptors; @@ -274,7 +273,7 @@ TEST_F(RateLimitConfiguration, Stages) { descriptors.clear(); local_descriptors.clear(); - rate_limits = route->rateLimitPolicy().getApplicableRateLimit(1UL); + rate_limits = route_entry->rateLimitPolicy().getApplicableRateLimit(1UL); EXPECT_EQ(1U, rate_limits.size()); for (const RateLimitPolicyEntry& rate_limit : rate_limits) { @@ -286,7 +285,7 @@ TEST_F(RateLimitConfiguration, Stages) { testing::ContainerEq(descriptors)); EXPECT_THAT(std::vector({{{{"remote_address", "10.0.0.1"}}}}), testing::ContainerEq(local_descriptors)); - rate_limits = route->rateLimitPolicy().getApplicableRateLimit(10UL); + rate_limits = route_entry->rateLimitPolicy().getApplicableRateLimit(10UL); EXPECT_TRUE(rate_limits.empty()); } @@ -297,13 +296,13 @@ class RateLimitPolicyEntryTest : public testing::Test { parseRateLimitFromV3Yaml(yaml), ProtobufMessage::getStrictValidationVisitor()); descriptors_.clear(); local_descriptors_.clear(); - stream_info_.downstream_address_provider_->setRemoteAddress(default_remote_address_); - ON_CALL(Const(stream_info_), routeEntry()).WillByDefault(testing::Return(&route_)); + stream_info_.downstream_connection_info_provider_->setRemoteAddress(default_remote_address_); + ON_CALL(Const(stream_info_), route()).WillByDefault(testing::Return(route_)); } std::unique_ptr rate_limit_entry_; Http::TestRequestHeaderMapImpl header_; - NiceMock route_; + std::shared_ptr route_{new NiceMock()}; std::vector descriptors_; std::vector local_descriptors_; Network::Address::InstanceConstSharedPtr default_remote_address_{ @@ -350,7 +349,7 @@ TEST_F(RateLimitPolicyEntryTest, PipeAddress) { setupTest(yaml); - stream_info_.downstream_address_provider_->setRemoteAddress( + stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("/hello")); rate_limit_entry_->populateDescriptors(descriptors_, "", header_, stream_info_); rate_limit_entry_->populateLocalDescriptors(local_descriptors_, "", header_, stream_info_); @@ -661,7 +660,7 @@ TEST_F(RateLimitPolicyEntryTest, MetaDataMatchRouteEntrySource) { prop: foo )EOF"; - TestUtility::loadFromYaml(metadata_yaml, route_.metadata_); + TestUtility::loadFromYaml(metadata_yaml, route_->metadata_); rate_limit_entry_->populateDescriptors(descriptors_, "", header_, stream_info_); rate_limit_entry_->populateLocalDescriptors(local_descriptors_, "", header_, stream_info_); diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index badfe10da7024..5963d3063a277 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -895,6 +895,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { putHttpResponseCode(200)); response_decoder->decodeHeaders(std::move(response_headers2), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); + EXPECT_EQ(2, callbacks_.stream_info_.attemptCount().value()); } // Validate that x-envoy-attempt-count is added when option is true. @@ -957,6 +958,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponsePresentWithLocalReply) { callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); EXPECT_EQ(callbacks_.details(), "upstream_reset_before_response_started{connection failure}"); + EXPECT_EQ(1U, callbacks_.stream_info_.attemptCount().value()); } // Validate that the x-envoy-attempt-count header in the downstream response reflects the number of @@ -982,6 +984,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponseWithRetries) { router_.decodeHeaders(headers, true); EXPECT_EQ(1U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_EQ(1U, callbacks_.stream_info_.attemptCount().value()); // 5xx response. router_.retry_state_->expectHeadersRetry(); @@ -1007,6 +1010,7 @@ TEST_F(RouterTest, EnvoyAttemptCountInResponseWithRetries) { router_.retry_state_->callback_(); EXPECT_EQ(2U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_EQ(2U, callbacks_.stream_info_.attemptCount().value()); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -2246,6 +2250,7 @@ TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) { Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { response_decoder1 = &decoder; EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + upstream_stream_info_.downstream_connection_info_provider_->setConnectionID(111); callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_, upstream_stream_info_, Http::Protocol::Http10); return nullptr; @@ -2284,6 +2289,7 @@ TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) { Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { response_decoder2 = &decoder; EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + upstream_stream_info_.downstream_connection_info_provider_->setConnectionID(222); callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_, upstream_stream_info_, Http::Protocol::Http10); return nullptr; @@ -2308,6 +2314,7 @@ TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) { Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { response_decoder3 = &decoder; EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + upstream_stream_info_.downstream_connection_info_provider_->setConnectionID(333); callbacks.onPoolReady(encoder3, cm_.thread_local_cluster_.conn_pool_.host_, upstream_stream_info_, Http::Protocol::Http10); return nullptr; @@ -2341,6 +2348,8 @@ TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) { response_decoder3->decodeHeaders(std::move(response_headers2), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); + EXPECT_EQ(333U, callbacks_.stream_info_.upstream_connection_id_); + // TODO: Verify hedge stats here once they are implemented. } @@ -3737,7 +3746,7 @@ TEST_F(RouterTest, MaxStreamDurationValidlyConfiguredWithoutRetryPolicy) { upstream_stream_info_, Http::Protocol::Http10); return nullptr; })); - expectMaxStreamDurationTimerCreate(); + expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500)); Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); @@ -3786,7 +3795,7 @@ TEST_F(RouterTest, MaxStreamDurationCallbackNotCalled) { upstream_stream_info_, Http::Protocol::Http10); return nullptr; })); - expectMaxStreamDurationTimerCreate(); + expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(5000)); Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); @@ -3809,7 +3818,7 @@ TEST_F(RouterTest, MaxStreamDurationWhenDownstreamAlreadyStartedWithoutRetryPoli upstream_stream_info_, Http::Protocol::Http10); return nullptr; })); - expectMaxStreamDurationTimerCreate(); + expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500)); Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); @@ -3837,7 +3846,7 @@ TEST_F(RouterTest, MaxStreamDurationWithRetryPolicy) { upstream_stream_info_, Http::Protocol::Http10); return nullptr; })); - expectMaxStreamDurationTimerCreate(); + expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500)); Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "reset"}, {"x-envoy-internal", "true"}}; @@ -3859,7 +3868,7 @@ TEST_F(RouterTest, MaxStreamDurationWithRetryPolicy) { upstream_stream_info_, Http::Protocol::Http10); return nullptr; })); - expectMaxStreamDurationTimerCreate(); + expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500)); router_.retry_state_->callback_(); EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); @@ -4484,6 +4493,47 @@ TEST_F(RouterTest, HttpInternalRedirectSucceeded) { .value()); } +TEST_F(RouterTest, InternalRedirectStripsFragment) { + enableRedirects(); + default_request_headers_.setForwardedProto("http"); + sendRequest(); + + EXPECT_CALL(callbacks_, clearRouteCache()); + EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true)); + Http::ResponseHeaderMapPtr redirect_headers{new Http::TestResponseHeaderMapImpl{ + {":status", "302"}, {"location", "http://www.foo.com/#fragment"}}}; + response_decoder_->decodeHeaders(std::move(redirect_headers), false); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_internal_redirect_succeeded_total") + .value()); + + // In production, the HCM recreateStream would have called this. + router_.onDestroy(); + EXPECT_EQ("/", default_request_headers_.getPathValue()); +} + +TEST_F(RouterTest, InternalRedirectKeepsFragmentWithOveride) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http_reject_path_with_fragment", "false"}}); + enableRedirects(); + default_request_headers_.setForwardedProto("http"); + sendRequest(); + + EXPECT_CALL(callbacks_, clearRouteCache()); + EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true)); + Http::ResponseHeaderMapPtr redirect_headers{new Http::TestResponseHeaderMapImpl{ + {":status", "302"}, {"location", "http://www.foo.com/#fragment"}}}; + response_decoder_->decodeHeaders(std::move(redirect_headers), false); + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_internal_redirect_succeeded_total") + .value()); + + // In production, the HCM recreateStream would have called this. + router_.onDestroy(); + EXPECT_EQ("/#fragment", default_request_headers_.getPathValue()); +} + TEST_F(RouterTest, HttpsInternalRedirectSucceeded) { auto ssl_connection = std::make_shared(); enableRedirects(3); @@ -4663,6 +4713,7 @@ TEST_F(RouterTest, Redirect) { router_.decodeHeaders(headers, true); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); } @@ -4686,6 +4737,7 @@ TEST_F(RouterTest, RedirectFound) { router_.decodeHeaders(headers, true); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); } @@ -4707,6 +4759,7 @@ TEST_F(RouterTest, DirectResponse) { router_.decodeHeaders(headers, true); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } @@ -4731,6 +4784,7 @@ TEST_F(RouterTest, DirectResponseWithBody) { router_.decodeHeaders(headers, true); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } @@ -4755,6 +4809,7 @@ TEST_F(RouterTest, DirectResponseWithLocation) { router_.decodeHeaders(headers, true); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } @@ -4778,6 +4833,7 @@ TEST_F(RouterTest, DirectResponseWithoutLocation) { router_.decodeHeaders(headers, true); EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value()); EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } @@ -4844,7 +4900,7 @@ TEST_F(RouterTest, UpstreamSSLConnection) { std::string session_id = "D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B"; auto connection_info = std::make_shared>(); ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id)); - upstream_stream_info_.setDownstreamSslConnection(connection_info); + upstream_stream_info_.downstream_connection_info_provider_->setSslConnection(connection_info); expectResponseTimerCreate(); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) @@ -4870,6 +4926,7 @@ TEST_F(RouterTest, UpstreamSSLConnection) { ASSERT_NE(nullptr, callbacks_.streamInfo().upstreamSslConnection()); EXPECT_EQ(session_id, callbacks_.streamInfo().upstreamSslConnection()->sessionId()); + EXPECT_FALSE(callbacks_.streamInfo().upstreamConnectionId().has_value()); } // Verify that upstream timing information is set into the StreamInfo after the upstream @@ -6062,6 +6119,48 @@ TEST_F(RouterTest, PostHttpUpstream) { router_.onDestroy(); } +TEST_F(RouterTest, SetDynamicMaxStreamDuration) { + NiceMock encoder1; + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::ResponseDecoder&, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_, + upstream_stream_info_, Http::Protocol::Http10); + return nullptr; + })); + expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500)); + + Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-stream-duration-ms", "500"}}; + + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + max_stream_duration_timer_->invokeCallback(); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + +TEST_F(RouterTest, NotSetDynamicMaxStreamDurationIfZero) { + NiceMock encoder1; + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::ResponseDecoder&, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_, + upstream_stream_info_, Http::Protocol::Http10); + return nullptr; + })); + + // The timer will not be created. + EXPECT_CALL(callbacks_.dispatcher_, createTimer_).Times(0); + + Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-stream-duration-ms", "0"}}; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); +} + // Test that request/response header/body sizes are properly recorded. TEST_F(RouterTest, RequestResponseSize) { testRequestResponseSize(false); } diff --git a/test/common/router/router_test_base.cc b/test/common/router/router_test_base.cc index 11a23de807f1b..671bbee7b588e 100644 --- a/test/common/router/router_test_base.cc +++ b/test/common/router/router_test_base.cc @@ -6,6 +6,7 @@ namespace Envoy { namespace Router { using ::testing::AnyNumber; +using ::testing::Eq; using ::testing::ReturnRef; RouterTestBase::RouterTestBase(bool start_child_span, bool suppress_envoy_headers, @@ -25,10 +26,10 @@ RouterTestBase::RouterTestBase(bool start_child_span, bool suppress_envoy_header .WillByDefault(Return(host_address_)); ON_CALL(*cm_.thread_local_cluster_.conn_pool_.host_, locality()) .WillByDefault(ReturnRef(upstream_locality_)); - router_.downstream_connection_.stream_info_.downstream_address_provider_->setLocalAddress( + router_.downstream_connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( host_address_); - router_.downstream_connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::parseInternetAddressAndPort("1.2.3.4:80")); + router_.downstream_connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::parseInternetAddressAndPort("1.2.3.4:80")); // Make the "system time" non-zero, because 0 is considered invalid by DateUtil. test_time_.setMonotonicTime(std::chrono::milliseconds(50)); @@ -50,9 +51,9 @@ void RouterTestBase::expectPerTryTimerCreate() { EXPECT_CALL(*per_try_timeout_, disableTimer()); } -void RouterTestBase::expectMaxStreamDurationTimerCreate() { +void RouterTestBase::expectMaxStreamDurationTimerCreate(std::chrono::milliseconds duration_msec) { max_stream_duration_timer_ = new Event::MockTimer(&callbacks_.dispatcher_); - EXPECT_CALL(*max_stream_duration_timer_, enableTimer(_, _)); + EXPECT_CALL(*max_stream_duration_timer_, enableTimer(Eq(duration_msec), _)); EXPECT_CALL(*max_stream_duration_timer_, disableTimer()); } @@ -153,6 +154,7 @@ void RouterTestBase::verifyAttemptCountInRequestBasic(bool set_include_attempt_c router_.decodeHeaders(headers, true); EXPECT_EQ(expected_count, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str())); + EXPECT_EQ(1U, callbacks_.stream_info_.attemptCount().value()); // When the router filter gets reset we should cancel the pool request. EXPECT_CALL(cancellable_, cancel(_)); diff --git a/test/common/router/router_test_base.h b/test/common/router/router_test_base.h index a212f29325700..9ac4a94bfc92f 100644 --- a/test/common/router/router_test_base.h +++ b/test/common/router/router_test_base.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "source/common/http/context_impl.h" #include "source/common/router/router.h" #include "source/common/stream_info/uint32_accessor_impl.h" @@ -56,7 +58,7 @@ class RouterTestBase : public testing::Test { void expectResponseTimerCreate(); void expectPerTryTimerCreate(); - void expectMaxStreamDurationTimerCreate(); + void expectMaxStreamDurationTimerCreate(std::chrono::milliseconds duration_msec); AssertionResult verifyHostUpstreamStats(uint64_t success, uint64_t error); void verifyMetadataMatchCriteriaFromRequest(bool route_entry_has_match); void verifyAttemptCountInRequestBasic(bool set_include_attempt_count_in_request, diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index 6831f32dd3963..612790a8ee538 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -114,10 +114,10 @@ class RouterUpstreamLogTest : public testing::Test { .WillByDefault(Return(host_address_)); ON_CALL(*context_.cluster_manager_.thread_local_cluster_.conn_pool_.host_, locality()) .WillByDefault(ReturnRef(upstream_locality_)); - router_->downstream_connection_.stream_info_.downstream_address_provider_->setLocalAddress( - host_address_); - router_->downstream_connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::parseInternetAddressAndPort("1.2.3.4:80")); + router_->downstream_connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(host_address_); + router_->downstream_connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::parseInternetAddressAndPort("1.2.3.4:80")); } void expectResponseTimerCreate() { diff --git a/test/common/router/scoped_rds_test.cc b/test/common/router/scoped_rds_test.cc index 8be75180cec13..f1cb48f1da7bd 100644 --- a/test/common/router/scoped_rds_test.cc +++ b/test/common/router/scoped_rds_test.cc @@ -2,7 +2,6 @@ #include "envoy/admin/v3/config_dump.pb.h" #include "envoy/admin/v3/config_dump.pb.validate.h" -#include "envoy/api/v2/route.pb.h" #include "envoy/config/core/v3/config_source.pb.h" #include "envoy/config/route/v3/route.pb.h" #include "envoy/config/route/v3/scoped_route.pb.h" @@ -53,7 +52,7 @@ using ::Envoy::Http::TestRequestHeaderMapImpl; envoy::config::route::v3::ScopedRouteConfiguration parseScopedRouteConfigurationFromYaml(const std::string& yaml) { envoy::config::route::v3::ScopedRouteConfiguration scoped_route_config; - TestUtility::loadFromYaml(yaml, scoped_route_config, true); + TestUtility::loadFromYaml(yaml, scoped_route_config); return scoped_route_config; } @@ -61,7 +60,7 @@ envoy::extensions::filters::network::http_connection_manager::v3::HttpConnection parseHttpConnectionManagerFromYaml(const std::string& config_yaml) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager http_connection_manager; - TestUtility::loadFromYaml(config_yaml, http_connection_manager, true); + TestUtility::loadFromYaml(config_yaml, http_connection_manager); return http_connection_manager; } @@ -136,7 +135,7 @@ class ScopedRdsTest : public ScopedRoutesTestBase { subscriptionFromConfigSource( _, Eq(Grpc::Common::typeUrl( - API_NO_BOOST(envoy::api::v2::RouteConfiguration)().GetDescriptor()->full_name())), + envoy::config::route::v3::RouteConfiguration().GetDescriptor()->full_name())), _, _, _, _)) .Times(AnyNumber()) .WillRepeatedly( @@ -820,12 +819,12 @@ stat_prefix: foo - name: foo-scoped-routes scoped_route_configs: - name: foo - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: foo-route-config key: fragments: { string_key: "172.10.10.10" } - name: foo2 - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: foo-route-config2 key: fragments: { string_key: "172.10.10.20" } @@ -855,12 +854,12 @@ route_configuration_name: dynamic-foo-route-config - name: foo-scoped-routes scoped_route_configs: - name: foo - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: foo-route-config key: fragments: { string_key: "172.10.10.10" } - name: foo2 - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: foo-route-config2 key: fragments: { string_key: "172.10.10.20" } @@ -871,7 +870,7 @@ route_configuration_name: dynamic-foo-route-config - name: foo_scoped_routes scoped_route_configs: - name: dynamic-foo - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: dynamic-foo-route-config key: fragments: { string_key: "172.30.30.10" } @@ -898,7 +897,7 @@ route_configuration_name: dynamic-foo-route-config - name: foo-scoped-routes scoped_route_configs: - name: foo - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: foo-route-config key: fragments: { string_key: "172.10.10.10" } @@ -934,7 +933,7 @@ route_configuration_name: dynamic-foo-route-config - name: foo_scoped_routes scoped_route_configs: - name: dynamic-foo - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: dynamic-foo-route-config key: fragments: { string_key: "172.30.30.10" } @@ -958,12 +957,12 @@ route_configuration_name: dynamic-foo-route-config - name: foo-scoped-routes scoped_route_configs: - name: foo - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: foo-route-config key: fragments: { string_key: "172.10.10.10" } - name: foo2 - "@type": type.googleapis.com/envoy.api.v2.ScopedRouteConfiguration + "@type": type.googleapis.com/envoy.config.route.v3.ScopedRouteConfiguration route_configuration_name: foo-route-config2 key: fragments: { string_key: "172.10.10.20" } diff --git a/test/common/router/upstream_request_test.cc b/test/common/router/upstream_request_test.cc index 312d5f0c99d69..3714f5c69daac 100644 --- a/test/common/router/upstream_request_test.cc +++ b/test/common/router/upstream_request_test.cc @@ -58,11 +58,13 @@ TEST_F(UpstreamRequestTest, Decode200UpgradeHeaders) { // UpstreamRequest dumpState without allocating memory. TEST_F(UpstreamRequestTest, DumpsStateWithoutAllocatingMemory) { // Set up router filter - auto address_provider = - router_filter_interface_.client_connection_.stream_info_.downstream_address_provider_; - address_provider->setRemoteAddress(Network::Utility::parseInternetAddressAndPort("1.2.3.4:5678")); - address_provider->setLocalAddress(Network::Utility::parseInternetAddressAndPort("5.6.7.8:5678")); - address_provider->setDirectRemoteAddressForTest( + auto connection_info_provider = + router_filter_interface_.client_connection_.stream_info_.downstream_connection_info_provider_; + connection_info_provider->setRemoteAddress( + Network::Utility::parseInternetAddressAndPort("1.2.3.4:5678")); + connection_info_provider->setLocalAddress( + Network::Utility::parseInternetAddressAndPort("5.6.7.8:5678")); + connection_info_provider->setDirectRemoteAddressForTest( Network::Utility::parseInternetAddressAndPort("1.2.3.4:5678")); // Dump State @@ -74,7 +76,7 @@ TEST_F(UpstreamRequestTest, DumpsStateWithoutAllocatingMemory) { // Check Contents EXPECT_THAT(ostream.contents(), HasSubstr("UpstreamRequest ")); - EXPECT_THAT(ostream.contents(), HasSubstr("addressProvider: \n SocketAddressSetterImpl ")); + EXPECT_THAT(ostream.contents(), HasSubstr("addressProvider: \n ConnectionInfoSetterImpl ")); EXPECT_THAT(ostream.contents(), HasSubstr("request_headers: \n")); } diff --git a/test/common/secret/BUILD b/test/common/secret/BUILD index 3f1488d0118fa..0e62c1f5186e1 100644 --- a/test/common/secret/BUILD +++ b/test/common/secret/BUILD @@ -2,12 +2,18 @@ load( "//bazel:envoy_build_system.bzl", "envoy_cc_test", "envoy_package", + "envoy_proto_library", ) licenses(["notice"]) # Apache 2 envoy_package() +envoy_proto_library( + name = "private_key_provider_proto", + srcs = ["private_key_provider.proto"], +) + envoy_cc_test( name = "secret_manager_impl_test", srcs = ["secret_manager_impl_test.cc"], @@ -15,6 +21,7 @@ envoy_cc_test( "//test/extensions/transport_sockets/tls/test_data:certs", ], deps = [ + ":private_key_provider_proto_cc_proto", "//source/common/secret:sds_api_lib", "//source/common/secret:secret_manager_impl_lib", "//source/common/ssl:certificate_validation_context_config_impl_lib", @@ -51,6 +58,7 @@ envoy_cc_test( "//test/mocks/init:init_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/secret:secret_mocks", + "//test/mocks/server:transport_socket_factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:logging_lib", "//test/test_common:registry_lib", diff --git a/test/common/secret/private_key_provider.proto b/test/common/secret/private_key_provider.proto new file mode 100644 index 0000000000000..7ac4d83b77538 --- /dev/null +++ b/test/common/secret/private_key_provider.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; + +package test.common.secret; + +message TestPrivateKeyMethodConfig { +} diff --git a/test/common/secret/sds_api_test.cc b/test/common/secret/sds_api_test.cc index 84e0d6159153c..a0cea30a2f8a8 100644 --- a/test/common/secret/sds_api_test.cc +++ b/test/common/secret/sds_api_test.cc @@ -20,6 +20,7 @@ #include "test/mocks/init/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/secret/mocks.h" +#include "test/mocks/server/transport_socket_factory_context.h" #include "test/test_common/environment.h" #include "test/test_common/logging.h" #include "test/test_common/utility.h" @@ -176,7 +177,8 @@ TEST_F(SdsApiTest, DynamicTlsCertificateUpdateSuccess) { EXPECT_CALL(secret_callback, onAddOrUpdateSecret()); subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, ""); - Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), nullptr, *api_); + testing::NiceMock ctx; + Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), ctx, *api_); const std::string cert_pem = "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem"; EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)), @@ -572,7 +574,8 @@ TEST_F(SdsApiTest, DeltaUpdateSuccess) { initialize(); subscription_factory_.callbacks_->onConfigUpdate(decoded_resources.refvec_, {}, ""); - Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), nullptr, *api_); + testing::NiceMock ctx; + Ssl::TlsCertificateConfigImpl tls_config(*sds_api.secret(), ctx, *api_); const std::string cert_pem = "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem"; EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)), diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 0c36a3c4ba404..15fc8fa59421b 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -14,6 +14,7 @@ #include "source/common/ssl/certificate_validation_context_config_impl.h" #include "source/common/ssl/tls_certificate_config_impl.h" +#include "test/common/secret/private_key_provider.pb.h" #include "test/mocks/event/mocks.h" #include "test/mocks/matcher/mocks.h" #include "test/mocks/server/config_tracker.h" @@ -33,6 +34,7 @@ namespace Envoy { namespace Secret { namespace { +const ::test::common::secret::TestPrivateKeyMethodConfig _mock_test_private_key_method_config_dummy; using ::Envoy::Matchers::MockStringMatcher; class SecretManagerImplTest : public testing::Test, public Logger::Loggable { @@ -73,14 +75,15 @@ name: "abc.com" filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config); - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); secret_manager->addStaticSecret(secret_config); ASSERT_EQ(secret_manager->findStaticTlsCertificateProvider("undefined"), nullptr); ASSERT_NE(secret_manager->findStaticTlsCertificateProvider("abc.com"), nullptr); + testing::NiceMock ctx; Ssl::TlsCertificateConfigImpl tls_config( - *secret_manager->findStaticTlsCertificateProvider("abc.com")->secret(), nullptr, *api_); + *secret_manager->findStaticTlsCertificateProvider("abc.com")->secret(), ctx, *api_); const std::string cert_pem = "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem"; EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)), @@ -106,7 +109,7 @@ TEST_F(SecretManagerImplTest, DuplicateStaticTlsCertificateSecret) { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_key.pem" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config); - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); secret_manager->addStaticSecret(secret_config); ASSERT_NE(secret_manager->findStaticTlsCertificateProvider("abc.com"), nullptr); @@ -150,7 +153,7 @@ TEST_F(SecretManagerImplTest, DuplicateStaticCertificateValidationContextSecret) allow_expired_certificate: true )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config); - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); secret_manager->addStaticSecret(secret_config); ASSERT_NE(secret_manager->findStaticCertificateValidationContextProvider("abc.com"), nullptr); @@ -172,7 +175,7 @@ name: "abc.com" TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config); - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); secret_manager->addStaticSecret(secret_config); @@ -201,7 +204,7 @@ name: "abc.com" TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret_config); - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); secret_manager->addStaticSecret(secret_config); @@ -212,7 +215,7 @@ name: "abc.com" // Validate that secret manager adds static generic secret successfully. TEST_F(SecretManagerImplTest, GenericSecretLoadSuccess) { - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); envoy::extensions::transport_sockets::tls::v3::Secret secret; const std::string yaml = @@ -237,7 +240,7 @@ name: "encryption_key" // Validate that secret manager throws an exception when adding duplicated static generic secret. TEST_F(SecretManagerImplTest, DuplicateGenericSecret) { - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); envoy::extensions::transport_sockets::tls::v3::Secret secret; const std::string yaml = @@ -259,7 +262,7 @@ name: "encryption_key" // Regression test of https://github.com/envoyproxy/envoy/issues/5744 TEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) { Server::MockInstance server; - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); NiceMock secret_context; @@ -342,7 +345,7 @@ header_key: x-token-bin TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { Server::MockInstance server; - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); NiceMock secret_context; @@ -380,7 +383,8 @@ name: "abc.com" init_target_handle->initialize(init_watcher); secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( decoded_resources.refvec_, ""); - Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), nullptr, *api_); + testing::NiceMock ctx; + Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), ctx, *api_); const std::string cert_pem = "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem"; EXPECT_EQ(TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(cert_pem)), @@ -393,7 +397,7 @@ name: "abc.com" TEST_F(SecretManagerImplTest, SdsDynamicGenericSecret) { Server::MockInstance server; - std::unique_ptr secret_manager(new SecretManagerImpl(config_tracker_)); + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); envoy::config::core::v3::ConfigSource config_source; NiceMock secret_context; @@ -479,7 +483,8 @@ name: "abc.com" init_target_handle->initialize(init_watcher); secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( decoded_resources.refvec_, "keycert-v1"); - Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), nullptr, *api_); + testing::NiceMock ctx; + Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), ctx, *api_); EXPECT_EQ("DUMMY_INLINE_BYTES_FOR_CERT_CHAIN", tls_config.certificateChain()); EXPECT_EQ("DUMMY_INLINE_BYTES_FOR_PRIVATE_KEY", tls_config.privateKey()); EXPECT_EQ("DUMMY_PASSWORD", tls_config.password()); @@ -1061,6 +1066,68 @@ name: "signing_key" checkConfigDump("{}", mock_matcher); } +// Test that private key provider definitions inside Secrets can be added dynamically. +TEST_F(SecretManagerImplTest, SdsDynamicSecretPrivateKeyProviderUpdateSuccess) { + Server::MockInstance server; + SecretManagerPtr secret_manager(new SecretManagerImpl(config_tracker_)); + NiceMock secret_context; + + envoy::config::core::v3::ConfigSource config_source; + NiceMock local_info; + NiceMock random; + Stats::IsolatedStoreImpl stats; + NiceMock init_manager; + NiceMock init_watcher; + Init::TargetHandlePtr init_target_handle; + EXPECT_CALL(init_manager, add(_)) + .WillOnce(Invoke([&init_target_handle](const Init::Target& target) { + init_target_handle = target.createHandle("test"); + })); + EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); + EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); + EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); + EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); + EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); + + auto secret_provider = + secret_manager->findOrCreateTlsCertificateProvider(config_source, "abc.com", secret_context); + const std::string yaml = + R"EOF( +name: "abc.com" +tls_certificate: + certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem" + private_key_provider: + provider_name: test + typed_config: + "@type": type.googleapis.com/test.common.secret.TestPrivateKeyMethodConfig +)EOF"; + envoy::extensions::transport_sockets::tls::v3::Secret typed_secret; + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), typed_secret); + EXPECT_TRUE(typed_secret.tls_certificate().has_private_key_provider()); + EXPECT_FALSE(typed_secret.tls_certificate().has_private_key()); + const auto decoded_resources = TestUtility::decodeResources({typed_secret}); + init_target_handle->initialize(init_watcher); + secret_context.cluster_manager_.subscription_factory_.callbacks_->onConfigUpdate( + decoded_resources.refvec_, ""); + EXPECT_TRUE(secret_provider->secret()->has_private_key_provider()); + EXPECT_FALSE(secret_provider->secret()->has_private_key()); + + // Fail because there isn't a real private key message provider, but not because the configuration + // is incorrect. + testing::NiceMock private_key_method_manager; + testing::NiceMock ssl_context_manager; + testing::NiceMock ctx; + EXPECT_CALL(private_key_method_manager, createPrivateKeyMethodProvider(_, _)) + .WillRepeatedly(Return(nullptr)); + EXPECT_CALL(ssl_context_manager, privateKeyMethodManager()) + .WillRepeatedly(ReturnRef(private_key_method_manager)); + EXPECT_CALL(ctx, sslContextManager()).WillRepeatedly(ReturnRef(ssl_context_manager)); + EXPECT_THROW_WITH_MESSAGE( + Ssl::TlsCertificateConfigImpl tls_config(*secret_provider->secret(), ctx, *api_), + EnvoyException, "Failed to load private key provider: test"); +} + } // namespace } // namespace Secret } // namespace Envoy diff --git a/test/common/stats/allocator_impl_test.cc b/test/common/stats/allocator_impl_test.cc index 83fb85d68ea8d..cc06acedaef1e 100644 --- a/test/common/stats/allocator_impl_test.cc +++ b/test/common/stats/allocator_impl_test.cc @@ -1,3 +1,4 @@ +#include #include #include "source/common/stats/allocator_impl.h" @@ -6,6 +7,7 @@ #include "test/test_common/thread_factory_for_test.h" #include "absl/synchronization/notification.h" +#include "gmock/gmock-matchers.h" #include "gtest/gtest.h" namespace Envoy { @@ -25,12 +27,18 @@ class AllocatorImplTest : public testing::Test { void clearStorage() { pool_.clear(); - EXPECT_EQ(0, symbol_table_.numSymbols()); + // If stats have been marked for deletion, they are not cleared until the + // destructor of alloc_ is called, and hence the symbol_table_.numSymbols() + // will be greater than zero at this point. + if (!are_stats_marked_for_deletion_) { + EXPECT_EQ(0, symbol_table_.numSymbols()); + } } SymbolTableImpl symbol_table_; AllocatorImpl alloc_; StatNamePool pool_; + bool are_stats_marked_for_deletion_ = false; }; // Allocate 2 counters of the same name, and you'll get the same object. @@ -125,6 +133,283 @@ TEST_F(AllocatorImplTest, RefCountDecAllocRaceSynchronized) { EXPECT_FALSE(alloc_.isMutexLockedForTest()); } +TEST_F(AllocatorImplTest, ForEachCounter) { + StatNameHashSet stat_names; + std::vector counters; + + const size_t num_stats = 11; + + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("counter.", idx)); + stat_names.insert(stat_name); + counters.emplace_back(alloc_.makeCounter(stat_name, StatName(), {})); + } + + size_t num_counters = 0; + size_t num_iterations = 0; + alloc_.forEachCounter([&num_counters](std::size_t size) { num_counters = size; }, + [&num_iterations, &stat_names](Stats::Counter& counter) { + EXPECT_EQ(stat_names.count(counter.statName()), 1); + ++num_iterations; + }); + EXPECT_EQ(num_counters, 11); + EXPECT_EQ(num_iterations, 11); + + // Reject a stat and remove it from "scope". + StatName rejected_stat_name = counters[4]->statName(); + alloc_.markCounterForDeletion(counters[4]); + are_stats_marked_for_deletion_ = true; + // Save a local reference to rejected stat. + Counter& rejected_counter = *counters[4]; + counters.erase(counters.begin() + 4); + + // Verify that the rejected stat does not show up during iteration. + num_iterations = 0; + num_counters = 0; + alloc_.forEachCounter([&num_counters](std::size_t size) { num_counters = size; }, + [&num_iterations, &rejected_stat_name](Stats::Counter& counter) { + EXPECT_THAT(counter.statName(), ::testing::Ne(rejected_stat_name)); + ++num_iterations; + }); + EXPECT_EQ(num_iterations, 10); + EXPECT_EQ(num_counters, 10); + + // Verify that we can access the local reference without a crash. + rejected_counter.inc(); + + // Erase all stats. + counters.clear(); + num_iterations = 0; + alloc_.forEachCounter([&num_counters](std::size_t size) { num_counters = size; }, + [&num_iterations](Stats::Counter&) { ++num_iterations; }); + EXPECT_EQ(num_counters, 0); + EXPECT_EQ(num_iterations, 0); +} + +TEST_F(AllocatorImplTest, ForEachGauge) { + StatNameHashSet stat_names; + std::vector gauges; + + const size_t num_stats = 11; + + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("gauge.", idx)); + stat_names.insert(stat_name); + gauges.emplace_back(alloc_.makeGauge(stat_name, StatName(), {}, Gauge::ImportMode::Accumulate)); + } + + size_t num_gauges = 0; + size_t num_iterations = 0; + alloc_.forEachGauge([&num_gauges](std::size_t size) { num_gauges = size; }, + [&num_iterations, &stat_names](Stats::Gauge& gauge) { + EXPECT_EQ(stat_names.count(gauge.statName()), 1); + ++num_iterations; + }); + EXPECT_EQ(num_gauges, 11); + EXPECT_EQ(num_iterations, 11); + + // Reject a stat and remove it from "scope". + StatName rejected_stat_name = gauges[3]->statName(); + alloc_.markGaugeForDeletion(gauges[3]); + are_stats_marked_for_deletion_ = true; + // Save a local reference to rejected stat. + Gauge& rejected_gauge = *gauges[3]; + gauges.erase(gauges.begin() + 3); + + // Verify that the rejected stat does not show up during iteration. + num_iterations = 0; + num_gauges = 0; + alloc_.forEachGauge([&num_gauges](std::size_t size) { num_gauges = size; }, + [&num_iterations, &rejected_stat_name](Stats::Gauge& gauge) { + EXPECT_THAT(gauge.statName(), ::testing::Ne(rejected_stat_name)); + ++num_iterations; + }); + EXPECT_EQ(num_iterations, 10); + EXPECT_EQ(num_gauges, 10); + + // Verify that we can access the local reference without a crash. + rejected_gauge.inc(); + + // Erase all stats. + gauges.clear(); + num_iterations = 0; + alloc_.forEachGauge([&num_gauges](std::size_t size) { num_gauges = size; }, + [&num_iterations](Stats::Gauge&) { ++num_iterations; }); + EXPECT_EQ(num_gauges, 0); + EXPECT_EQ(num_iterations, 0); +} + +TEST_F(AllocatorImplTest, ForEachTextReadout) { + StatNameHashSet stat_names; + std::vector text_readouts; + + const size_t num_stats = 11; + + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("text_readout.", idx)); + stat_names.insert(stat_name); + text_readouts.emplace_back(alloc_.makeTextReadout(stat_name, StatName(), {})); + } + + size_t num_text_readouts = 0; + size_t num_iterations = 0; + alloc_.forEachTextReadout([&num_text_readouts](std::size_t size) { num_text_readouts = size; }, + [&num_iterations, &stat_names](Stats::TextReadout& text_readout) { + EXPECT_EQ(stat_names.count(text_readout.statName()), 1); + ++num_iterations; + }); + EXPECT_EQ(num_text_readouts, 11); + EXPECT_EQ(num_iterations, 11); + + // Reject a stat and remove it from "scope". + StatName rejected_stat_name = text_readouts[4]->statName(); + alloc_.markTextReadoutForDeletion(text_readouts[4]); + are_stats_marked_for_deletion_ = true; + // Save a local reference to rejected stat. + TextReadout& rejected_text_readout = *text_readouts[4]; + text_readouts.erase(text_readouts.begin() + 4); + + // Verify that the rejected stat does not show up during iteration. + num_iterations = 0; + num_text_readouts = 0; + alloc_.forEachTextReadout( + [&num_text_readouts](std::size_t size) { num_text_readouts = size; }, + [&num_iterations, &rejected_stat_name](Stats::TextReadout& text_readout) { + EXPECT_THAT(text_readout.statName(), ::testing::Ne(rejected_stat_name)); + ++num_iterations; + }); + EXPECT_EQ(num_iterations, 10); + EXPECT_EQ(num_text_readouts, 10); + + // Verify that we can access the local reference without a crash. + rejected_text_readout.set("no crash"); + + // Erase all stats. + text_readouts.clear(); + num_iterations = 0; + alloc_.forEachTextReadout([&num_text_readouts](std::size_t size) { num_text_readouts = size; }, + [&num_iterations](Stats::TextReadout&) { ++num_iterations; }); + EXPECT_EQ(num_text_readouts, 0); + EXPECT_EQ(num_iterations, 0); +} + +// Verify that we don't crash if a nullptr is passed in for the size lambda for +// the for each stat methods. +TEST_F(AllocatorImplTest, ForEachWithNullSizeLambda) { + std::vector counters; + std::vector text_readouts; + std::vector gauges; + + const size_t num_stats = 3; + + // For each counter. + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("counter.", idx)); + counters.emplace_back(alloc_.makeCounter(stat_name, StatName(), {})); + } + size_t num_iterations = 0; + alloc_.forEachCounter(nullptr, [&num_iterations](Stats::Counter& counter) { + (void)counter; + ++num_iterations; + }); + EXPECT_EQ(num_iterations, num_stats); + + // For each gauge. + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("gauge.", idx)); + gauges.emplace_back(alloc_.makeGauge(stat_name, StatName(), {}, Gauge::ImportMode::Accumulate)); + } + num_iterations = 0; + alloc_.forEachGauge(nullptr, [&num_iterations](Stats::Gauge& gauge) { + (void)gauge; + ++num_iterations; + }); + EXPECT_EQ(num_iterations, num_stats); + + // For each text readout. + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("text_readout.", idx)); + text_readouts.emplace_back(alloc_.makeTextReadout(stat_name, StatName(), {})); + } + num_iterations = 0; + alloc_.forEachTextReadout(nullptr, [&num_iterations](Stats::TextReadout& text_readout) { + (void)text_readout; + ++num_iterations; + }); + EXPECT_EQ(num_iterations, num_stats); +} + +// Currently, if we ask for a stat from the Allocator that has already been +// marked for deletion (i.e. rejected) we get a new stat with the same name. +// This test documents this behavior. +TEST_F(AllocatorImplTest, AskForDeletedStat) { + const size_t num_stats = 10; + are_stats_marked_for_deletion_ = true; + + std::vector counters; + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("counter.", idx)); + counters.emplace_back(alloc_.makeCounter(stat_name, StatName(), {})); + } + // Reject a stat and remove it from "scope". + StatName const rejected_counter_name = counters[4]->statName(); + alloc_.markCounterForDeletion(counters[4]); + // Save a local reference to rejected stat. + Counter& rejected_counter = *counters[4]; + counters.erase(counters.begin() + 4); + + rejected_counter.inc(); + rejected_counter.inc(); + + // Make the deleted stat again. + CounterSharedPtr deleted_counter = alloc_.makeCounter(rejected_counter_name, StatName(), {}); + + EXPECT_EQ(deleted_counter->value(), 0); + EXPECT_EQ(rejected_counter.value(), 2); + + std::vector gauges; + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("gauge.", idx)); + gauges.emplace_back(alloc_.makeGauge(stat_name, StatName(), {}, Gauge::ImportMode::Accumulate)); + } + // Reject a stat and remove it from "scope". + StatName const rejected_gauge_name = gauges[4]->statName(); + alloc_.markGaugeForDeletion(gauges[4]); + // Save a local reference to rejected stat. + Gauge& rejected_gauge = *gauges[4]; + gauges.erase(gauges.begin() + 4); + + rejected_gauge.set(10); + + // Make the deleted stat again. + GaugeSharedPtr deleted_gauge = + alloc_.makeGauge(rejected_gauge_name, StatName(), {}, Gauge::ImportMode::Accumulate); + + EXPECT_EQ(deleted_gauge->value(), 0); + EXPECT_EQ(rejected_gauge.value(), 10); + + std::vector text_readouts; + for (size_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = makeStat(absl::StrCat("text_readout.", idx)); + text_readouts.emplace_back(alloc_.makeTextReadout(stat_name, StatName(), {})); + } + // Reject a stat and remove it from "scope". + StatName const rejected_text_readout_name = text_readouts[4]->statName(); + alloc_.markTextReadoutForDeletion(text_readouts[4]); + // Save a local reference to rejected stat. + TextReadout& rejected_text_readout = *text_readouts[4]; + text_readouts.erase(text_readouts.begin() + 4); + + rejected_text_readout.set("deleted value"); + + // Make the deleted stat again. + TextReadoutSharedPtr deleted_text_readout = + alloc_.makeTextReadout(rejected_text_readout_name, StatName(), {}); + + EXPECT_EQ(deleted_text_readout->value(), ""); + EXPECT_EQ(rejected_text_readout.value(), "deleted value"); +} + } // namespace } // namespace Stats } // namespace Envoy diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index b723fe3083fb9..5bf5e67395437 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -508,10 +508,15 @@ TEST_F(StatsThreadLocalStoreTest, ScopeDelete) { EXPECT_CALL(main_thread_dispatcher_, post(_)); EXPECT_CALL(tls_, runOnAllThreads(_, _)).Times(testing::AtLeast(1)); scope1.reset(); - EXPECT_EQ(0UL, store_->counters().size()); + // The counter is gone from all scopes, but is still held in the local + // variable c1. Hence, it will not be removed from the allocator or store. + EXPECT_EQ(1UL, store_->counters().size()); EXPECT_EQ(1L, c1.use_count()); c1.reset(); + // Removing the counter from the local variable, should now remove it from the + // allocator. + EXPECT_EQ(0UL, store_->counters().size()); tls_.shutdownGlobalThreading(); store_->shutdownThreading(); @@ -1192,6 +1197,48 @@ TEST_F(StatsThreadLocalStoreTest, RemoveRejectedStats) { tls_.shutdownThread(); } +// Verify that asking for deleted stats by name does not create new copies on +// the allocator. +TEST_F(StatsThreadLocalStoreTest, AskForRejectedStat) { + store_->initializeThreading(main_thread_dispatcher_, tls_); + Counter& counter = store_->counterFromString("c1"); + Gauge& gauge = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); + TextReadout& text_readout = store_->textReadoutFromString("t1"); + ASSERT_EQ(1, store_->counters().size()); // "c1". + ASSERT_EQ(1, store_->gauges().size()); + ASSERT_EQ(1, store_->textReadouts().size()); + + // Will effectively block all stats, and remove all the non-matching stats. + envoy::config::metrics::v3::StatsConfig stats_config; + stats_config.mutable_stats_matcher()->mutable_inclusion_list()->add_patterns()->set_exact( + "no-such-stat"); + store_->setStatsMatcher(std::make_unique(stats_config, symbol_table_)); + + // They can no longer be found. + EXPECT_EQ(0, store_->counters().size()); + EXPECT_EQ(0, store_->gauges().size()); + EXPECT_EQ(0, store_->textReadouts().size()); + + // Ask for the rejected stats again by name. + Counter& counter2 = store_->counterFromString("c1"); + Gauge& gauge2 = store_->gaugeFromString("g1", Gauge::ImportMode::Accumulate); + TextReadout& text_readout2 = store_->textReadoutFromString("t1"); + + // Verify we got the same stats. + EXPECT_EQ(&counter, &counter2); + EXPECT_EQ(&gauge, &gauge2); + EXPECT_EQ(&text_readout, &text_readout2); + + // Verify that new stats were not created. + EXPECT_EQ(0, store_->counters().size()); + EXPECT_EQ(0, store_->gauges().size()); + EXPECT_EQ(0, store_->textReadouts().size()); + + tls_.shutdownGlobalThreading(); + store_->shutdownThreading(); + tls_.shutdownThread(); +} + TEST_F(StatsThreadLocalStoreTest, NonHotRestartNoTruncation) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); @@ -1557,9 +1604,8 @@ class ThreadLocalRealThreadsTestBase : public ThreadLocalStoreNoMocksTestBase { }; ThreadLocalRealThreadsTestBase(uint32_t num_threads) - : num_threads_(num_threads), start_time_(time_system_.monotonicTime()), - api_(Api::createApiForTest()), thread_factory_(api_->threadFactory()), - pool_(store_->symbolTable()) { + : num_threads_(num_threads), api_(Api::createApiForTest()), + thread_factory_(api_->threadFactory()), pool_(store_->symbolTable()) { // This is the same order as InstanceImpl::initialize in source/server/server.cc. thread_dispatchers_.resize(num_threads_); { @@ -1643,8 +1689,6 @@ class ThreadLocalRealThreadsTestBase : public ThreadLocalStoreNoMocksTestBase { } const uint32_t num_threads_; - Event::TestRealTimeSystem time_system_; - MonotonicTime start_time_; Api::ApiPtr api_; Event::DispatcherPtr main_dispatcher_; std::vector thread_dispatchers_; @@ -1661,7 +1705,8 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalRealThreadsTestBa ClusterShutdownCleanupStarvationTest() : ThreadLocalRealThreadsTestBase(NumThreads), my_counter_name_(pool_.add("my_counter")), - my_counter_scoped_name_(pool_.add("scope.my_counter")) {} + my_counter_scoped_name_(pool_.add("scope.my_counter")), + start_time_(time_system_.monotonicTime()) {} void createScopesIncCountersAndCleanup() { for (uint32_t i = 0; i < NumScopes; ++i) { @@ -1684,8 +1729,10 @@ class ClusterShutdownCleanupStarvationTest : public ThreadLocalRealThreadsTestBa start_time_); } + Event::TestRealTimeSystem time_system_; StatName my_counter_name_; StatName my_counter_scoped_name_; + MonotonicTime start_time_; }; // Tests the scenario where a cluster and stat are allocated in multiple diff --git a/test/common/stream_info/stream_info_impl_test.cc b/test/common/stream_info/stream_info_impl_test.cc index 6ff19887f4fc7..cfe761f58f3a0 100644 --- a/test/common/stream_info/stream_info_impl_test.cc +++ b/test/common/stream_info/stream_info_impl_test.cc @@ -166,10 +166,11 @@ TEST_F(StreamInfoImplTest, MiscSettersAndGetters) { stream_info.healthCheck(true); EXPECT_TRUE(stream_info.healthCheck()); - EXPECT_EQ(nullptr, stream_info.routeEntry()); - NiceMock route_entry; - stream_info.route_entry_ = &route_entry; - EXPECT_EQ(&route_entry, stream_info.routeEntry()); + EXPECT_EQ(nullptr, stream_info.route()); + std::shared_ptr> route = + std::make_shared>(); + stream_info.route_ = route; + EXPECT_EQ(route, stream_info.route()); stream_info.filterState()->setData("test", std::make_unique(1), FilterState::StateType::ReadOnly, @@ -192,6 +193,11 @@ TEST_F(StreamInfoImplTest, MiscSettersAndGetters) { EXPECT_CALL(*ssl_info, sessionId()).WillRepeatedly(testing::ReturnRef(session_id)); stream_info.setUpstreamSslConnection(ssl_info); EXPECT_EQ(session_id, stream_info.upstreamSslConnection()->sessionId()); + + EXPECT_FALSE(stream_info.upstreamConnectionId().has_value()); + stream_info.setUpstreamConnectionId(12345); + ASSERT_TRUE(stream_info.upstreamConnectionId().has_value()); + EXPECT_EQ(12345, stream_info.upstreamConnectionId().value()); } } diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index a4ab278e8db3e..8026bde7de900 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -75,16 +75,8 @@ class TestStreamInfo : public StreamInfo::StreamInfo { } bool healthCheck() const override { return health_check_request_; } void healthCheck(bool is_health_check) override { health_check_request_ = is_health_check; } - const Network::SocketAddressSetter& downstreamAddressProvider() const override { - return *downstream_address_provider_; - } - void - setDownstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& connection_info) override { - downstream_connection_info_ = connection_info; - } - - Ssl::ConnectionInfoConstSharedPtr downstreamSslConnection() const override { - return downstream_connection_info_; + const Network::ConnectionInfoSetter& downstreamAddressProvider() const override { + return *downstream_connection_info_provider_; } void setUpstreamSslConnection(const Ssl::ConnectionInfoConstSharedPtr& connection_info) override { @@ -99,7 +91,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { } const std::string& getRouteName() const override { return route_name_; } - const Router::RouteEntry* routeEntry() const override { return route_entry_; } + Router::RouteConstSharedPtr route() const override { return route_; } absl::optional duration(const absl::optional& time) const { @@ -219,6 +211,14 @@ class TestStreamInfo : public StreamInfo::StreamInfo { const std::string& filterChainName() const override { return filter_chain_name_; } + void setUpstreamConnectionId(uint64_t id) override { upstream_connection_id_ = id; } + + absl::optional upstreamConnectionId() const override { return upstream_connection_id_; } + + void setAttemptCount(uint32_t attempt_count) override { attempt_count_ = attempt_count; } + + absl::optional attemptCount() const override { return attempt_count_; } + Random::RandomGeneratorImpl random_; SystemTime start_time_; MonotonicTime start_time_monotonic_; @@ -241,11 +241,11 @@ class TestStreamInfo : public StreamInfo::StreamInfo { bool health_check_request_{}; std::string route_name_; Network::Address::InstanceConstSharedPtr upstream_local_address_; - Network::SocketAddressSetterSharedPtr downstream_address_provider_{ - std::make_shared(nullptr, nullptr)}; + Network::ConnectionInfoSetterSharedPtr downstream_connection_info_provider_{ + std::make_shared(nullptr, nullptr)}; Ssl::ConnectionInfoConstSharedPtr downstream_connection_info_; Ssl::ConnectionInfoConstSharedPtr upstream_connection_info_; - const Router::RouteEntry* route_entry_{}; + Router::RouteConstSharedPtr route_; envoy::config::core::v3::Metadata metadata_{}; Envoy::StreamInfo::FilterStateSharedPtr filter_state_{ std::make_shared( @@ -258,9 +258,10 @@ class TestStreamInfo : public StreamInfo::StreamInfo { Envoy::Event::SimulatedTimeSystem test_time_; absl::optional upstream_cluster_info_{}; Http::RequestIdStreamInfoProviderSharedPtr request_id_provider_; - absl::optional connection_id_; std::string filter_chain_name_; Tracing::Reason trace_reason_{Tracing::Reason::NotTraceable}; + absl::optional upstream_connection_id_; + absl::optional attempt_count_; }; } // namespace Envoy diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 8657f444a4834..de4f489e05f46 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -57,7 +57,7 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { conn_data_ = std::move(conn); conn_data_->addUpstreamCallbacks(callbacks_); host_ = host; - ssl_ = conn_data_->connection().streamInfo().downstreamSslConnection(); + ssl_ = conn_data_->connection().streamInfo().downstreamAddressProvider().sslConnection(); pool_ready_.ready(); } @@ -327,7 +327,7 @@ class TcpConnPoolImplDestructorTest : public Event::TestUsingSimulatedTime, EXPECT_CALL(*connection_, connect()); EXPECT_CALL(*connection_, setConnectionStats(_)); EXPECT_CALL(*connection_, noDelay(true)); - EXPECT_CALL(*connection_, streamInfo()).Times(3); + EXPECT_CALL(*connection_, streamInfo()); EXPECT_CALL(*connection_, id()).Times(AnyNumber()); EXPECT_CALL(*connection_, readDisable(_)).Times(AnyNumber()); @@ -341,10 +341,8 @@ class TcpConnPoolImplDestructorTest : public Event::TestUsingSimulatedTime, EXPECT_CALL(*connect_timer_, disableTimer()); EXPECT_CALL(callbacks_->pool_ready_, ready()); - EXPECT_CALL(*connection_, ssl()).WillOnce(Return(ssl_)); connection_->raiseEvent(Network::ConnectionEvent::Connected); - EXPECT_EQ(connection_->streamInfo().downstreamSslConnection(), ssl_); - EXPECT_EQ(callbacks_->ssl_, ssl_); + connection_->stream_info_.downstream_connection_info_provider_->setSslConnection(ssl_); } bool test_new_connection_pool_; diff --git a/test/common/tcp_proxy/config_test.cc b/test/common/tcp_proxy/config_test.cc index d5c49016465c9..891043958be7f 100644 --- a/test/common/tcp_proxy/config_test.cc +++ b/test/common/tcp_proxy/config_test.cc @@ -11,7 +11,7 @@ cluster: foo )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); EXPECT_EQ(std::chrono::hours(1), config_obj.sharedConfig()->idleTimeout().value()); } @@ -23,7 +23,7 @@ idle_timeout: 0s )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); EXPECT_FALSE(config_obj.sharedConfig()->idleTimeout().has_value()); } @@ -35,7 +35,7 @@ idle_timeout: 1s )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); EXPECT_EQ(std::chrono::seconds(1), config_obj.sharedConfig()->idleTimeout().value()); } @@ -47,7 +47,7 @@ max_downstream_connection_duration: 10s )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); EXPECT_EQ(std::chrono::seconds(10), config_obj.maxDownstreamConnectionDuration().value()); } @@ -60,311 +60,6 @@ TEST(ConfigTest, NoRouteConfig) { EXPECT_THROW(constructConfigFromYaml(yaml, factory_context), EnvoyException); } -TEST(ConfigTest, DEPRECATED_FEATURE_TEST(BadConfig)) { - const std::string yaml_string = R"EOF( - stat_prefix: 1 - cluster: cluster - deprecated_v1: - routes: - - cluster: fake_cluster - )EOF"; - - NiceMock factory_context; - EXPECT_THROW(constructConfigFromYaml(yaml_string, factory_context, false), EnvoyException); -} - -TEST(ConfigTest, DEPRECATED_FEATURE_TEST(EmptyRouteConfig)) { - const std::string yaml = R"EOF( - stat_prefix: name - cluster: cluster - deprecated_v1: - routes: [] - )EOF"; - - NiceMock factory_context_; - EXPECT_THROW(constructConfigFromYaml(yaml, factory_context_, false), EnvoyException); -} - -TEST(ConfigTest, DEPRECATED_FEATURE_TEST(Routes)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( - stat_prefix: name - cluster: cluster - deprecated_v1: - routes: - - destination_ip_list: - - address_prefix: 10.10.10.10 - prefix_len: 32 - - address_prefix: 10.10.11.0 - prefix_len: 24 - - address_prefix: 10.11.0.0 - prefix_len: 16 - - address_prefix: 11.0.0.0 - prefix_len: 8 - - address_prefix: 128.0.0.0 - prefix_len: 1 - cluster: with_destination_ip_list - - destination_ip_list: - - address_prefix: "::1" - prefix_len: 128 - - address_prefix: "2001:abcd::" - prefix_len: 64 - cluster: with_v6_destination - - destination_ports: 1-1024,2048-4096,12345 - cluster: with_destination_ports - - source_ports: '23457,23459' - cluster: with_source_ports - - destination_ip_list: - - address_prefix: "2002::" - prefix_len: 32 - source_ip_list: - - address_prefix: "2003::" - prefix_len: 64 - cluster: with_v6_source_and_destination - - destination_ip_list: - - address_prefix: 10.0.0.0 - prefix_len: 24 - source_ip_list: - - address_prefix: 20.0.0.0 - prefix_len: 24 - destination_ports: '10000' - source_ports: '20000' - cluster: with_everything - - cluster: catch_all - )EOF"; - - NiceMock factory_context_; - Config config_obj(constructConfigFromYaml(yaml, factory_context_, false)); - - { - // hit route with destination_ip (10.10.10.10/32) - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("10.10.10.10")); - EXPECT_EQ(std::string("with_destination_ip_list"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // fall-through - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("10.10.10.11")); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("0.0.0.0")); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with destination_ip (10.10.11.0/24) - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("10.10.11.11")); - EXPECT_EQ(std::string("with_destination_ip_list"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // fall-through - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("10.10.12.12")); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("0.0.0.0")); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with destination_ip (10.11.0.0/16) - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("10.11.11.11")); - EXPECT_EQ(std::string("with_destination_ip_list"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // fall-through - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("10.12.12.12")); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("0.0.0.0")); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with destination_ip (11.0.0.0/8) - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("11.11.11.11")); - EXPECT_EQ(std::string("with_destination_ip_list"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // fall-through - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("12.12.12.12")); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("0.0.0.0")); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with destination_ip (128.0.0.0/8) - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("128.255.255.255")); - EXPECT_EQ(std::string("with_destination_ip_list"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with destination port range - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("1.2.3.4", 12345)); - EXPECT_EQ(std::string("with_destination_ports"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // fall through - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("1.2.3.4", 23456)); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("0.0.0.0")); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with source port range - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("1.2.3.4", 23456)); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("0.0.0.0", 23459)); - EXPECT_EQ(std::string("with_source_ports"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // fall through - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("1.2.3.4", 23456)); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("0.0.0.0", 23458)); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit the route with all criteria present - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("10.0.0.0", 10000)); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("20.0.0.0", 20000)); - EXPECT_EQ(std::string("with_everything"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // fall through - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("10.0.0.0", 10000)); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("30.0.0.0", 20000)); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with destination_ip (::1/128) - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("::1")); - EXPECT_EQ(std::string("with_v6_destination"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with destination_ip ("2001:abcd/64") - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("2001:abcd:0:0:1::")); - EXPECT_EQ(std::string("with_v6_destination"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // hit route with destination_ip ("2002::/32") and source_ip ("2003::/64") - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("2002:0:0:0:0:0::1")); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("2003:0:0:0:0::5")); - EXPECT_EQ(std::string("with_v6_source_and_destination"), - config_obj.getRouteFromEntries(connection)->clusterName()); - } - - { - // fall through - NiceMock connection; - connection.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("2004::")); - connection.stream_info_.downstream_address_provider_->setRemoteAddress( - std::make_shared("::")); - EXPECT_EQ(std::string("catch_all"), config_obj.getRouteFromEntries(connection)->clusterName()); - } -} - -// Tests that a deprecated_v1 route gets the top-level endpoint selector. -TEST(ConfigTest, DEPRECATED_FEATURE_TEST(RouteWithTopLevelMetadataMatchConfig)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( - stat_prefix: name - cluster: cluster - deprecated_v1: - routes: - - cluster: catch_all - metadata_match: - filter_metadata: - envoy.lb: - k1: v1 - k2: v2 -)EOF"; - - NiceMock factory_context_; - Config config_obj(constructConfigFromYaml(yaml, factory_context_, false)); - - ProtobufWkt::Value v1, v2; - v1.set_string_value("v1"); - v2.set_string_value("v2"); - HashedValue hv1(v1), hv2(v2); - - NiceMock connection; - const auto route = config_obj.getRouteFromEntries(connection); - EXPECT_NE(nullptr, route); - - EXPECT_EQ("catch_all", route->clusterName()); - - const auto* criteria = route->metadataMatchCriteria(); - EXPECT_NE(nullptr, criteria); - - const auto& criterions = criteria->metadataMatchCriteria(); - EXPECT_EQ(2, criterions.size()); - - EXPECT_EQ("k1", criterions[0]->name()); - EXPECT_EQ(hv1, criterions[0]->value()); - - EXPECT_EQ("k2", criterions[1]->name()); - EXPECT_EQ(hv2, criterions[1]->value()); -} - // Tests that it's not possible to define a weighted cluster with 0 weight. TEST(ConfigTest, WeightedClusterWithZeroWeightConfig) { const std::string yaml = R"EOF( @@ -377,7 +72,7 @@ TEST(ConfigTest, WeightedClusterWithZeroWeightConfig) { )EOF"; NiceMock factory_context; - EXPECT_THROW(constructConfigFromV3Yaml(yaml, factory_context), EnvoyException); + EXPECT_THROW(constructConfigFromYaml(yaml, factory_context), EnvoyException); } // Tests that it is possible to define a list of weighted clusters. @@ -393,7 +88,7 @@ TEST(ConfigTest, WeightedClustersConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); NiceMock connection; EXPECT_CALL(factory_context.api_.random_, random()).WillOnce(Return(0)); @@ -427,7 +122,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); { ProtobufWkt::Value v1, v2; @@ -514,7 +209,7 @@ TEST(ConfigTest, WeightedClustersWithMetadataMatchAndTopLevelMetadataMatchConfig )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); ProtobufWkt::Value v00, v01, v04; v00.set_string_value("v00"); @@ -605,7 +300,7 @@ TEST(ConfigTest, WeightedClustersWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -644,7 +339,7 @@ TEST(ConfigTest, TopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -677,7 +372,7 @@ TEST(ConfigTest, ClusterWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -716,7 +411,7 @@ TEST(ConfigTest, PerConnectionClusterWithTopLevelMetadataMatchConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); ProtobufWkt::Value v1, v2; v1.set_string_value("v1"); @@ -755,7 +450,7 @@ TEST(ConfigTest, HashWithSourceIpConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); EXPECT_NE(nullptr, config_obj.hashPolicy()); } @@ -766,7 +461,7 @@ TEST(ConfigTest, HashWithSourceIpDefaultConfig) { )EOF"; NiceMock factory_context; - Config config_obj(constructConfigFromV3Yaml(yaml, factory_context)); + Config config_obj(constructConfigFromYaml(yaml, factory_context)); EXPECT_EQ(nullptr, config_obj.hashPolicy()); } @@ -796,24 +491,16 @@ TEST(ConfigTest, AccessLogConfig) { EXPECT_EQ(2, config_obj.accessLogs().size()); } -class TcpProxyRoutingTest : public testing::Test { +class TcpProxyNonDeprecatedConfigRoutingTest : public testing::Test { public: - TcpProxyRoutingTest() = default; - - void setup(bool avoid_boosting = true) { + void setup() { const std::string yaml = R"EOF( stat_prefix: name - cluster: fallback_cluster - deprecated_v1: - routes: - - destination_ports: 1-9999 - cluster: fake_cluster + cluster: fake_cluster )EOF"; - factory_context_.cluster_manager_.initializeThreadLocalClusters( - {"fallback_cluster", "fake_cluster"}); - config_ = - std::make_shared(constructConfigFromYaml(yaml, factory_context_, avoid_boosting)); + factory_context_.cluster_manager_.initializeThreadLocalClusters({"fake_cluster"}); + config_ = std::make_shared(constructConfigFromYaml(yaml, factory_context_)); } void initializeFilter() { @@ -823,8 +510,6 @@ class TcpProxyRoutingTest : public testing::Test { filter_->initializeReadFilterCallbacks(filter_callbacks_); } - Event::TestTimeSystem& timeSystem() { return factory_context_.timeSystem(); } - NiceMock factory_context_; ConfigSharedPtr config_; NiceMock connection_; @@ -832,152 +517,13 @@ class TcpProxyRoutingTest : public testing::Test { std::unique_ptr filter_; }; -TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(NonRoutableConnection)) { - TestDeprecatedV2Api _deprecated_v2_api; - setup(false); - - const uint32_t total_cx = config_->stats().downstream_cx_total_.value(); - const uint32_t non_routable_cx = config_->stats().downstream_cx_no_route_.value(); - - initializeFilter(); - - // Port 10000 is outside the specified destination port range. - connection_.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("1.2.3.4", 10000)); - - // Expect filter to try to open a connection to the fallback cluster. - EXPECT_CALL(factory_context_.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) - .WillOnce(Return(absl::nullopt)); - - filter_->onNewConnection(); - - EXPECT_EQ(total_cx + 1, config_->stats().downstream_cx_total_.value()); - EXPECT_EQ(non_routable_cx, config_->stats().downstream_cx_no_route_.value()); -} - -TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(RoutableConnection)) { - TestDeprecatedV2Api _deprecated_v2_api; - setup(false); - - const uint32_t total_cx = config_->stats().downstream_cx_total_.value(); - const uint32_t non_routable_cx = config_->stats().downstream_cx_no_route_.value(); - - initializeFilter(); - - // Port 9999 is within the specified destination port range. - connection_.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("1.2.3.4", 9999)); - - // Expect filter to try to open a connection to specified cluster. - EXPECT_CALL(factory_context_.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) - .WillOnce(Return(absl::nullopt)); - - filter_->onNewConnection(); - - EXPECT_EQ(total_cx + 1, config_->stats().downstream_cx_total_.value()); - EXPECT_EQ(non_routable_cx, config_->stats().downstream_cx_no_route_.value()); -} - -// Test that the tcp proxy uses the cluster from FilterState if set -TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UseClusterFromPerConnectionCluster)) { - TestDeprecatedV2Api _deprecated_v2_api; - setup(false); - initializeFilter(); - - factory_context_.cluster_manager_.initializeThreadLocalClusters({"filter_state_cluster"}); - connection_.streamInfo().filterState()->setData( - "envoy.tcp_proxy.cluster", std::make_unique("filter_state_cluster"), - StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Connection); - - // Expect filter to try to open a connection to specified cluster. - EXPECT_CALL(factory_context_.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) - .WillOnce(Return(absl::nullopt)); - - filter_->onNewConnection(); -} - -// Test that the tcp proxy forwards the requested server name from FilterState if set -TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(UpstreamServerName)) { - TestDeprecatedV2Api _deprecated_v2_api; - setup(false); - initializeFilter(); - - connection_.streamInfo().filterState()->setData( - "envoy.network.upstream_server_name", - std::make_unique("www.example.com"), - StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection); - - // Expect filter to try to open a connection to a cluster with the transport socket options with - // override-server-name - EXPECT_CALL(factory_context_.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) - .WillOnce(Invoke([](Upstream::ResourcePriority, Upstream::LoadBalancerContext* context) { - Network::TransportSocketOptionsConstSharedPtr transport_socket_options = - context->upstreamTransportSocketOptions(); - EXPECT_NE(transport_socket_options, nullptr); - EXPECT_TRUE(transport_socket_options->serverNameOverride().has_value()); - EXPECT_EQ(transport_socket_options->serverNameOverride().value(), "www.example.com"); - return absl::nullopt; - })); - - // Port 9999 is within the specified destination port range. - connection_.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("1.2.3.4", 9999)); - - filter_->onNewConnection(); -} - -// Test that the tcp proxy override ALPN from FilterState if set -TEST_F(TcpProxyRoutingTest, DEPRECATED_FEATURE_TEST(ApplicationProtocols)) { - TestDeprecatedV2Api _deprecated_v2_api; - setup(false); - initializeFilter(); - - connection_.streamInfo().filterState()->setData( - Network::ApplicationProtocols::key(), - std::make_unique(std::vector{"foo", "bar"}), - StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection); - - // Expect filter to try to open a connection to a cluster with the transport socket options with - // override-application-protocol - EXPECT_CALL(factory_context_.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) - .WillOnce(Invoke([](Upstream::ResourcePriority, Upstream::LoadBalancerContext* context) { - Network::TransportSocketOptionsConstSharedPtr transport_socket_options = - context->upstreamTransportSocketOptions(); - EXPECT_NE(transport_socket_options, nullptr); - EXPECT_FALSE(transport_socket_options->applicationProtocolListOverride().empty()); - EXPECT_EQ(transport_socket_options->applicationProtocolListOverride().size(), 2); - EXPECT_EQ(transport_socket_options->applicationProtocolListOverride()[0], "foo"); - EXPECT_EQ(transport_socket_options->applicationProtocolListOverride()[1], "bar"); - return absl::nullopt; - })); - - // Port 9999 is within the specified destination port range. - connection_.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("1.2.3.4", 9999)); - - filter_->onNewConnection(); -} - -class TcpProxyNonDeprecatedConfigRoutingTest : public TcpProxyRoutingTest { -public: - void setup() { - const std::string yaml = R"EOF( - stat_prefix: name - cluster: fake_cluster - )EOF"; - - factory_context_.cluster_manager_.initializeThreadLocalClusters({"fake_cluster"}); - config_ = std::make_shared(constructConfigFromYaml(yaml, factory_context_)); - } -}; - TEST_F(TcpProxyNonDeprecatedConfigRoutingTest, ClusterNameSet) { setup(); initializeFilter(); // Port 9999 is within the specified destination port range. - connection_.stream_info_.downstream_address_provider_->setLocalAddress( + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( std::make_shared("1.2.3.4", 9999)); // Expect filter to try to open a connection to specified cluster. @@ -1037,9 +583,9 @@ TEST_F(TcpProxyHashingTest, HashWithSourceIp) { return absl::nullopt; })); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("1.2.3.4", 1111)); - connection_.stream_info_.downstream_address_provider_->setLocalAddress( + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( std::make_shared("2.3.4.5", 2222)); filter_->onNewConnection(); diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 3fd3d169f2904..8ca0a1e0ae956 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -74,8 +74,9 @@ class TcpProxyTest : public TcpProxyTestBase { conn_pool_handles_.push_back( std::make_unique>()); ON_CALL(*upstream_hosts_.at(i), address()).WillByDefault(Return(upstream_remote_address_)); - upstream_connections_.at(i)->stream_info_.downstream_address_provider_->setLocalAddress( - upstream_local_address_); + upstream_connections_.at(i) + ->stream_info_.downstream_connection_info_provider_->setLocalAddress( + upstream_local_address_); EXPECT_CALL(*upstream_connections_.at(i), dispatcher()) .WillRepeatedly(ReturnRef(filter_callbacks_.connection_.dispatcher_)); } @@ -122,8 +123,8 @@ class TcpProxyTest : public TcpProxyTestBase { EXPECT_CALL(filter_callbacks_.connection_, enableHalfClose(true)); EXPECT_CALL(filter_callbacks_.connection_, readDisable(true)); filter_->initializeReadFilterCallbacks(filter_callbacks_); - filter_callbacks_.connection_.streamInfo().setDownstreamSslConnection( - filter_callbacks_.connection_.ssl()); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setSslConnection(filter_callbacks_.connection_.ssl()); } if (connections > 0) { @@ -136,15 +137,8 @@ class TcpProxyTest : public TcpProxyTestBase { } }; -TEST_F(TcpProxyTest, DefaultRoutes) { - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config = defaultConfig(); - - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::WeightedCluster::ClusterWeight* - ignored_cluster = config.mutable_weighted_clusters()->mutable_clusters()->Add(); - ignored_cluster->set_name("ignored_cluster"); - ignored_cluster->set_weight(10); - - configure(config); +TEST_F(TcpProxyTest, ExplicitCluster) { + configure(defaultConfig()); NiceMock connection; EXPECT_EQ(std::string("fake_cluster"), config_->getRouteFromEntries(connection)->clusterName()); @@ -225,7 +219,7 @@ TEST_F(TcpProxyTest, BadFactory) { EXPECT_CALL(filter_callbacks_.connection_, enableHalfClose(true)); EXPECT_CALL(filter_callbacks_.connection_, readDisable(true)); filter_->initializeReadFilterCallbacks(filter_callbacks_); - filter_callbacks_.connection_.streamInfo().setDownstreamSslConnection( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setSslConnection( filter_callbacks_.connection_.ssl()); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); } @@ -279,6 +273,7 @@ TEST_F(TcpProxyTest, ConnectAttemptsUpstreamLocalFail) { EXPECT_EQ(0U, factory_context_.cluster_manager_.thread_local_cluster_.cluster_.info_->stats_store_ .counter("upstream_cx_connect_attempts_exceeded") .value()); + EXPECT_EQ(2U, filter_->getStreamInfo().attemptCount().value()); } // Make sure that the tcp proxy code handles reentrant calls to onPoolFailure. @@ -881,9 +876,9 @@ TEST_F(TcpProxyTest, AccessLogUpstreamLocalAddress) { // Test that access log fields %DOWNSTREAM_PEER_URI_SAN% is correctly logged. TEST_F(TcpProxyTest, AccessLogPeerUriSan) { - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( Network::Utility::resolveUrl("tcp://1.1.1.2:20000")); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( Network::Utility::resolveUrl("tcp://1.1.1.1:40000")); const std::vector uriSan{"someSan"}; @@ -899,9 +894,9 @@ TEST_F(TcpProxyTest, AccessLogPeerUriSan) { // Test that access log fields %DOWNSTREAM_TLS_SESSION_ID% is correctly logged. TEST_F(TcpProxyTest, AccessLogTlsSessionId) { - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( Network::Utility::resolveUrl("tcp://1.1.1.2:20000")); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( Network::Utility::resolveUrl("tcp://1.1.1.1:40000")); const std::string tlsSessionId{ @@ -919,9 +914,9 @@ TEST_F(TcpProxyTest, AccessLogTlsSessionId) { // Test that access log fields %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% and // %DOWNSTREAM_LOCAL_ADDRESS% are correctly logged. TEST_F(TcpProxyTest, AccessLogDownstreamAddress) { - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( Network::Utility::resolveUrl("tcp://1.1.1.2:20000")); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( Network::Utility::resolveUrl("tcp://1.1.1.1:40000")); setup(1, accessLogConfig("%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% %DOWNSTREAM_LOCAL_ADDRESS%")); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -936,7 +931,7 @@ TEST_F(TcpProxyTest, AccessLogUpstreamSSLConnection) { const std::string session_id = "D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B"; auto ssl_info = std::make_shared(); EXPECT_CALL(*ssl_info, sessionId()).WillRepeatedly(ReturnRef(session_id)); - stream_info.setDownstreamSslConnection(ssl_info); + stream_info.downstream_connection_info_provider_->setSslConnection(ssl_info); EXPECT_CALL(*upstream_connections_.at(0), streamInfo()).WillRepeatedly(ReturnRef(stream_info)); raiseEventUpstreamConnected(0); @@ -1096,12 +1091,12 @@ TEST_F(TcpProxyTest, AccessDownstreamAndUpstreamProperties) { setup(1); raiseEventUpstreamConnected(0); - EXPECT_EQ(filter_callbacks_.connection().streamInfo().downstreamSslConnection(), + EXPECT_EQ(filter_callbacks_.connection().streamInfo().downstreamAddressProvider().sslConnection(), filter_callbacks_.connection().ssl()); EXPECT_EQ(filter_callbacks_.connection().streamInfo().upstreamLocalAddress(), upstream_connections_.at(0)->streamInfo().downstreamAddressProvider().localAddress()); EXPECT_EQ(filter_callbacks_.connection().streamInfo().upstreamSslConnection(), - upstream_connections_.at(0)->streamInfo().downstreamSslConnection()); + upstream_connections_.at(0)->streamInfo().downstreamAddressProvider().sslConnection()); } } // namespace } // namespace TcpProxy diff --git a/test/common/tcp_proxy/tcp_proxy_test_base.h b/test/common/tcp_proxy/tcp_proxy_test_base.h index 37676f5bbceda..b6b437b2f38ee 100644 --- a/test/common/tcp_proxy/tcp_proxy_test_base.h +++ b/test/common/tcp_proxy/tcp_proxy_test_base.h @@ -51,18 +51,9 @@ using ::testing::SaveArg; } // namespace inline Config constructConfigFromYaml(const std::string& yaml, - Server::Configuration::FactoryContext& context, - bool avoid_boosting = true) { + Server::Configuration::FactoryContext& context) { envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; - TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy, false, avoid_boosting); - return Config(tcp_proxy, context); -} - -inline Config constructConfigFromV3Yaml(const std::string& yaml, - Server::Configuration::FactoryContext& context, - bool avoid_boosting = true) { - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; - TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy, false, avoid_boosting); + TestUtility::loadFromYamlAndValidate(yaml, tcp_proxy); return Config(tcp_proxy, context); } @@ -98,8 +89,7 @@ class TcpProxyTestBase : public testing::Test { envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy defaultConfig() { envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy config; config.set_stat_prefix("name"); - auto* route = config.mutable_hidden_envoy_deprecated_deprecated_v1()->mutable_routes()->Add(); - route->set_cluster("fake_cluster"); + config.set_cluster("fake_cluster"); return config; } diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index 158e39f9073e6..1b263c8dfe661 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -141,7 +141,7 @@ TEST_F(HttpConnManFinalizerImplTest, OriginalAndLongPath) { EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); absl::optional response_code; EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code)); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber()); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(path_prefix + expected_path))); @@ -172,7 +172,7 @@ TEST_F(HttpConnManFinalizerImplTest, NoGeneratedId) { EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); absl::optional response_code; EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code)); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber()); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(path_prefix + expected_path))); @@ -202,7 +202,7 @@ TEST_F(HttpConnManFinalizerImplTest, Connect) { EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); absl::optional response_code; EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code)); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber()); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpUrl), Eq(""))); @@ -220,7 +220,7 @@ TEST_F(HttpConnManFinalizerImplTest, NullRequestHeadersAndNullRouteEntry) { absl::optional response_code; EXPECT_CALL(stream_info, responseCode()).WillRepeatedly(ReturnPointee(&response_code)); EXPECT_CALL(stream_info, upstreamHost()).WillRepeatedly(Return(nullptr)); - EXPECT_CALL(stream_info, routeEntry()).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(stream_info, route()).WillRepeatedly(Return(nullptr)); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().HttpStatusCode), Eq("0"))); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); @@ -326,7 +326,7 @@ TEST_F(HttpConnManFinalizerImplTest, SpanOptionalHeaders) { absl::optional protocol = Http::Protocol::Http10; EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); // Check that span is populated correctly. EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GuidXRequestId), Eq("id"))); @@ -364,7 +364,7 @@ TEST_F(HttpConnManFinalizerImplTest, UnixDomainSocketPeerAddressTag) { const std::string path_{TestEnvironment::unixDomainSocketPath("foo")}; const auto remote_address = Network::Utility::resolveUrl("unix://" + path_); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); // Check that the PeerAddress is populated correctly for Unix domain sockets. EXPECT_CALL(span, setTag(_, _)).Times(AnyNumber()); @@ -396,9 +396,9 @@ TEST_F(HttpConnManFinalizerImplTest, SpanCustomTags) { emp: "")EOF"; TestUtility::loadFromYaml(yaml, fake_struct); (*stream_info.metadata_.mutable_filter_metadata())["m.req"].MergeFrom(fake_struct); - NiceMock route_entry; - EXPECT_CALL(stream_info, routeEntry()).WillRepeatedly(Return(&route_entry)); - (*route_entry.metadata_.mutable_filter_metadata())["m.rot"].MergeFrom(fake_struct); + std::shared_ptr route{new NiceMock()}; + EXPECT_CALL(stream_info, route()).WillRepeatedly(Return(route)); + (*route->metadata_.mutable_filter_metadata())["m.rot"].MergeFrom(fake_struct); std::shared_ptr host_metadata = std::make_shared(); (*host_metadata->mutable_filter_metadata())["m.host"].MergeFrom(fake_struct); @@ -511,7 +511,7 @@ TEST_F(HttpConnManFinalizerImplTest, SpanPopulatedFailureResponse) { absl::optional protocol = Http::Protocol::Http10; EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); // Check that span is populated correctly. EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().GuidXRequestId), Eq("id"))); @@ -570,7 +570,7 @@ TEST_F(HttpConnManFinalizerImplTest, GrpcOkStatus) { EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11)); EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Component), Eq(Tracing::Tags::get().Proxy))); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().DownstreamCluster), Eq("-"))); @@ -622,7 +622,7 @@ TEST_F(HttpConnManFinalizerImplTest, GrpcErrorTag) { EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11)); EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber()); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); @@ -667,7 +667,7 @@ TEST_F(HttpConnManFinalizerImplTest, GrpcTrailersOnly) { EXPECT_CALL(stream_info, bytesReceived()).WillOnce(Return(10)); EXPECT_CALL(stream_info, bytesSent()).WillOnce(Return(11)); EXPECT_CALL(stream_info, protocol()).WillRepeatedly(ReturnPointee(&protocol)); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(remote_address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(remote_address); EXPECT_CALL(span, setTag(_, _)).Times(testing::AnyNumber()); EXPECT_CALL(span, setTag(Eq(Tracing::Tags::get().Error), Eq(Tracing::Tags::get().True))); diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 84c54f4dfd2eb..a0a36ca636bae 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -41,7 +41,10 @@ envoy_cc_test( deps = [ ":test_cluster_manager", "//source/common/router:context_lib", + "//source/common/upstream:load_balancer_factory_base_lib", "//source/extensions/transport_sockets/tls:config", + "//test/config:v2_link_hacks", + "//test/integration/load_balancers:custom_lb_policy", "//test/mocks/matcher:matcher_mocks", "//test/mocks/upstream:cds_api_mocks", "//test/mocks/upstream:cluster_priority_set_mocks", @@ -82,10 +85,19 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "wrsq_scheduler_test", + srcs = ["wrsq_scheduler_test.cc"], + deps = [ + "//source/common/upstream:scheduler_lib", + "//test/mocks:common_lib", + ], +) + envoy_cc_test( name = "edf_scheduler_test", srcs = ["edf_scheduler_test.cc"], - deps = ["//source/common/upstream:edf_scheduler_lib"], + deps = ["//source/common/upstream:scheduler_lib"], ) envoy_cc_test( @@ -99,6 +111,7 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:config", "//source/server:transport_socket_config_lib", "//test/common/stats:stat_test_utility_lib", + "//test/integration/load_balancers:custom_lb_policy", "//test/mocks/local_info:local_info_mocks", "//test/mocks/protobuf:protobuf_mocks", "//test/mocks/runtime:runtime_mocks", @@ -152,6 +165,28 @@ envoy_benchmark_test( benchmark_binary = "eds_speed_test", ) +envoy_cc_test( + name = "leds_test", + srcs = ["leds_test.cc"], + deps = [ + ":utility_lib", + "//source/common/config:utility_lib", + "//source/common/upstream:leds_lib", + "//source/extensions/transport_sockets/raw_buffer:config", + "//source/server:transport_socket_config_lib", + "//test/common/stats:stat_test_utility_lib", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/protobuf:protobuf_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:admin_mocks", + "//test/mocks/server:instance_mocks", + "//test/mocks/ssl:ssl_mocks", + "//test/mocks/upstream:cluster_manager_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + ], +) + envoy_cc_test_library( name = "health_check_fuzz_utils_lib", srcs = [ @@ -292,7 +327,7 @@ envoy_proto_library( envoy_cc_fuzz_test( name = "random_load_balancer_fuzz_test", srcs = ["random_load_balancer_fuzz_test.cc"], - corpus = "//test/common/upstream:random_load_balancer_corpus", + corpus = "random_load_balancer_corpus", deps = [ ":load_balancer_fuzz_lib", ":load_balancer_fuzz_proto_cc_proto", @@ -792,7 +827,7 @@ envoy_proto_library( envoy_cc_fuzz_test( name = "round_robin_load_balancer_fuzz_test", srcs = ["round_robin_load_balancer_fuzz_test.cc"], - corpus = "//test/common/upstream:round_robin_load_balancer_corpus", + corpus = "round_robin_load_balancer_corpus", deps = [ ":round_robin_load_balancer_fuzz_proto_cc_proto", ":utility_lib", @@ -813,10 +848,22 @@ envoy_proto_library( envoy_cc_fuzz_test( name = "least_request_load_balancer_fuzz_test", srcs = ["least_request_load_balancer_fuzz_test.cc"], - corpus = "//test/common/upstream:least_request_load_balancer_corpus", + corpus = "least_request_load_balancer_corpus", deps = [ ":least_request_load_balancer_fuzz_proto_cc_proto", ":utility_lib", ":zone_aware_load_balancer_fuzz_lib", ], ) + +envoy_cc_benchmark_binary( + name = "scheduler_benchmark", + srcs = ["scheduler_benchmark.cc"], + external_deps = [ + "benchmark", + ], + deps = [ + "//source/common/common:random_generator_lib", + "//source/common/upstream:scheduler_lib", + ], +) diff --git a/test/common/upstream/cds_api_impl_test.cc b/test/common/upstream/cds_api_impl_test.cc index 33eaee18fa92d..3e39017ec2672 100644 --- a/test/common/upstream/cds_api_impl_test.cc +++ b/test/common/upstream/cds_api_impl_test.cc @@ -208,11 +208,11 @@ TEST_F(CdsApiImplTest, DeltaConfigUpdate) { { envoy::config::cluster::v3::Cluster cluster; cluster.set_name("cluster_3"); - expectAdd("cluster_3", "v2"); + expectAdd("cluster_3", "v3"); auto* resource = resources.Add(); resource->mutable_resource()->PackFrom(cluster); resource->set_name("cluster_3"); - resource->set_version("v2"); + resource->set_version("v3"); } Protobuf::RepeatedPtrField removed; *removed.Add() = "cluster_1"; diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index a7ace7425ba85..b8f8003ce34f7 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -7,9 +7,11 @@ #include "source/common/network/raw_buffer_socket.h" #include "source/common/network/resolver_impl.h" #include "source/common/router/context_impl.h" +#include "source/common/upstream/load_balancer_factory_base.h" #include "source/extensions/transport_sockets/raw_buffer/config.h" #include "test/common/upstream/test_cluster_manager.h" +#include "test/config/v2_link_hacks.h" #include "test/mocks/http/conn_pool.h" #include "test/mocks/matcher/mocks.h" #include "test/mocks/upstream/cds_api.h" @@ -57,10 +59,9 @@ using ::testing::SaveArg; using namespace std::chrono_literals; -envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml, - bool avoid_boosting = true) { +envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml) { envoy::config::bootstrap::v3::Bootstrap bootstrap; - TestUtility::loadFromYaml(yaml, bootstrap, true, avoid_boosting); + TestUtility::loadFromYaml(yaml, bootstrap); return bootstrap; } @@ -478,6 +479,7 @@ TEST_F(ClusterManagerImplTest, PrimaryClusters) { type: eds eds_cluster_config: eds_config: + resource_api_version: V3 api_config_source: api_type: GRPC transport_api_version: V3 @@ -523,8 +525,7 @@ class ClusterManagerSubsetInitializationTest if (envoy::config::cluster::v3::Cluster::LbPolicy_IsValid(i)) { auto policy = static_cast(i); if (policy != - envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB && - policy != envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG) { + envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB) { policies.push_back(policy); } } @@ -577,7 +578,8 @@ TEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization) } const std::string yaml = fmt::format(yamlPattern, cluster_type, policy_name); - if (GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { + if (GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED || + GetParam() == envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG) { EXPECT_THROW_WITH_MESSAGE( create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", @@ -755,6 +757,152 @@ TEST_F(ClusterManagerImplTest, ClusterProvidedLbNotConfigured) { "'cluster_0' provided one. Check cluster documentation."); } +// Verify that specifying LOAD_BALANCING_POLICY_CONFIG with CommonLbConfig is an error. +TEST_F(ClusterManagerImplTest, LbPolicyConfigCannotSpecifyCommonLbConfig) { + // envoy.load_balancers.custom_lb is registered by linking in + // //test/integration/load_balancers:custom_lb_policy. + const std::string yaml = fmt::format(R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STATIC + lb_policy: LOAD_BALANCING_POLICY_CONFIG + load_balancing_policy: + policies: + - typed_extension_config: + name: envoy.load_balancers.custom_lb + common_lb_config: + update_merge_window: 3s + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 + )EOF"); + + EXPECT_THROW_WITH_MESSAGE( + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, + "cluster: LB policy LOAD_BALANCING_POLICY_CONFIG cannot be combined with common_lb_config"); +} + +// Verify that LOAD_BALANCING_POLICY_CONFIG without specifying load balancing policy is an error. +TEST_F(ClusterManagerImplTest, LbPolicyConfigMustSpecifyLbPolicy) { + const std::string yaml = fmt::format(R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STATIC + lb_policy: LOAD_BALANCING_POLICY_CONFIG + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 + )EOF"); + + EXPECT_THROW_WITH_MESSAGE( + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, + "cluster: LB policy LOAD_BALANCING_POLICY_CONFIG requires load_balancing_policy to be set"); +} + +// Verify that multiple load balancing policies can be specified, and Envoy selects the first +// policy that it has a factory for. +TEST_F(ClusterManagerImplTest, LbPolicyConfig) { + // envoy.load_balancers.custom_lb is registered by linking in + // //test/integration/load_balancers:custom_lb_policy. + const std::string yaml = fmt::format(R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STATIC + lb_policy: LOAD_BALANCING_POLICY_CONFIG + load_balancing_policy: + policies: + - typed_extension_config: + name: envoy.load_balancers.unknown_lb + - typed_extension_config: + name: envoy.load_balancers.custom_lb + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 + )EOF"); + + create(parseBootstrapFromV3Yaml(yaml)); + const auto& cluster = cluster_manager_->clusters().getCluster("cluster_1"); + EXPECT_NE(cluster, absl::nullopt); + EXPECT_EQ(cluster->get().info()->loadBalancingPolicy().typed_extension_config().name(), + "envoy.load_balancers.custom_lb"); +} + +// Verify that if Envoy does not have a factory for any of the load balancing policies specified in +// the load balancing policy config, it is an error. +TEST_F(ClusterManagerImplTest, LbPolicyConfigThrowsExceptionIfNoLbPoliciesFound) { + const std::string yaml = fmt::format(R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STATIC + lb_policy: LOAD_BALANCING_POLICY_CONFIG + load_balancing_policy: + policies: + - typed_extension_config: + name: envoy.load_balancers.unknown_lb_1 + - typed_extension_config: + name: envoy.load_balancers.unknown_lb_2 + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8000 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 8001 + )EOF"); + + EXPECT_THROW_WITH_MESSAGE( + create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, + "Didn't find a registered load balancer factory implementation for cluster: 'cluster_1'"); +} + class ClusterManagerImplThreadAwareLbTest : public ClusterManagerImplTest { public: void doTest(LoadBalancerType lb_type) { @@ -1594,15 +1742,17 @@ TEST_F(ClusterManagerImplTest, DynamicAddRemove) { EXPECT_EQ(cluster2->info_, cluster_manager_->getThreadLocalCluster("fake_cluster")->info()); EXPECT_EQ(1UL, cluster_manager_->clusters().active_clusters_.size()); Http::ConnectionPool::MockInstance* cp = new Http::ConnectionPool::MockInstance(); + Http::ConnectionPool::Instance::IdleCb idle_cb; EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)).WillOnce(Return(cp)); - EXPECT_CALL(*cp, addIdleCallback(_)); + EXPECT_CALL(*cp, addIdleCallback(_)).WillOnce(SaveArg<0>(&idle_cb)); EXPECT_EQ(cp, HttpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("fake_cluster") ->httpConnPool(ResourcePriority::Default, Http::Protocol::Http11, nullptr))); Tcp::ConnectionPool::MockInstance* cp2 = new Tcp::ConnectionPool::MockInstance(); + Tcp::ConnectionPool::Instance::IdleCb idle_cb2; EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(cp2)); - EXPECT_CALL(*cp2, addIdleCallback(_)); + EXPECT_CALL(*cp2, addIdleCallback(_)).WillOnce(SaveArg<0>(&idle_cb2)); EXPECT_EQ(cp2, TcpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("fake_cluster") ->tcpConnPool(ResourcePriority::Default, nullptr))); @@ -1633,6 +1783,9 @@ TEST_F(ClusterManagerImplTest, DynamicAddRemove) { // Remove an unknown cluster. EXPECT_FALSE(cluster_manager_->removeCluster("foo")); + idle_cb(); + idle_cb2(); + checkStats(1 /*added*/, 1 /*modified*/, 1 /*removed*/, 0 /*active*/, 0 /*warming*/); EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); @@ -3441,6 +3594,80 @@ TEST_F(ClusterManagerImplTest, HttpPoolDataForwardsCallsToConnectionPool) { opt_cp.value().addIdleCallback(drained_cb); } +// Test that the read only cross-priority host map in the main thread is correctly synchronized to +// the worker thread when the cluster's host set is updated. +TEST_F(ClusterManagerImplTest, CrossPriorityHostMapSyncTest) { + std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.250s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: cluster_1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11002 + common_lb_config: + update_merge_window: 0s + )EOF"; + create(parseBootstrapFromV3Yaml(yaml)); + + Cluster& cluster = cluster_manager_->activeClusters().begin()->second; + EXPECT_EQ(2, cluster.prioritySet().crossPriorityHostMap()->size()); + EXPECT_EQ( + cluster_manager_->getThreadLocalCluster("cluster_1")->prioritySet().crossPriorityHostMap(), + cluster.prioritySet().crossPriorityHostMap()); + + HostVectorSharedPtr hosts( + new HostVector(cluster.prioritySet().hostSetsPerPriority()[0]->hosts())); + HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared(); + HostVector hosts_added; + HostVector hosts_removed; + + hosts_removed.push_back((*hosts)[0]); + cluster.prioritySet().updateHosts( + 0, + updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); + + EXPECT_EQ(1, factory_.stats_.counter("cluster_manager.cluster_updated").value()); + EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); + EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_merge_cancelled").value()); + + EXPECT_EQ(1, cluster.prioritySet().crossPriorityHostMap()->size()); + EXPECT_EQ( + cluster_manager_->getThreadLocalCluster("cluster_1")->prioritySet().crossPriorityHostMap(), + cluster.prioritySet().crossPriorityHostMap()); + + hosts_added.push_back((*hosts)[0]); + hosts_removed.clear(); + cluster.prioritySet().updateHosts( + 0, + updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); + EXPECT_EQ(2, factory_.stats_.counter("cluster_manager.cluster_updated").value()); + EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.cluster_updated_via_merge").value()); + EXPECT_EQ(0, factory_.stats_.counter("cluster_manager.update_merge_cancelled").value()); + + EXPECT_EQ(2, cluster.prioritySet().crossPriorityHostMap()->size()); + EXPECT_EQ( + cluster_manager_->getThreadLocalCluster("cluster_1")->prioritySet().crossPriorityHostMap(), + cluster.prioritySet().crossPriorityHostMap()); +} + class TestUpstreamNetworkFilter : public Network::WriteFilter { public: Network::FilterStatus onWrite(Buffer::Instance&, bool) override { diff --git a/test/common/upstream/eds_speed_test.cc b/test/common/upstream/eds_speed_test.cc index e6da3c12a1ec2..0eb5fae8bd607 100644 --- a/test/common/upstream/eds_speed_test.cc +++ b/test/common/upstream/eds_speed_test.cc @@ -39,18 +39,16 @@ namespace Upstream { class EdsSpeedTest { public: - EdsSpeedTest(State& state, bool v2_config) - : state_(state), v2_config_(v2_config), - type_url_(v2_config_ - ? "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment" - : "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"), + EdsSpeedTest(State& state) + : state_(state), + type_url_("type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"), subscription_stats_(Config::Utility::generateStats(stats_)), api_(Api::createApiForTest(stats_)), async_client_(new Grpc::MockAsyncClient()), grpc_mux_(new Config::GrpcMuxImpl( local_info_, std::unique_ptr(async_client_), dispatcher_, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( "envoy.service.endpoint.v3.EndpointDiscoveryService.StreamEndpoints"), - envoy::config::core::v3::ApiVersion::AUTO, random_, stats_, {}, true)) { + random_, stats_, {}, true)) { resetCluster(R"EOF( name: name connect_timeout: 0.25s @@ -129,12 +127,6 @@ class EdsSpeedTest { response->set_version_info(fmt::format("version-{}", version_++)); auto* resource = response->mutable_resources()->Add(); resource->PackFrom(cluster_load_assignment); - if (v2_config_) { - RELEASE_ASSERT(resource->type_url() == - "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", - ""); - resource->set_type_url("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"); - } state_.ResumeTiming(); grpc_mux_->grpcStreamForTest().onReceiveMessage(std::move(response)); ASSERT(cluster_->prioritySet().hostSetsPerPriority()[1]->hostsPerLocality().get()[0].size() == @@ -143,7 +135,6 @@ class EdsSpeedTest { TestDeprecatedV2Api _deprecated_v2_api_; State& state_; - const bool v2_config_; const std::string type_url_; uint64_t version_{}; bool initialized_{}; @@ -180,16 +171,16 @@ static void priorityAndLocalityWeighted(State& state) { Envoy::Logger::Context logging_state(spdlog::level::warn, Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); for (auto _ : state) { - Envoy::Upstream::EdsSpeedTest speed_test(state, state.range(0)); + Envoy::Upstream::EdsSpeedTest speed_test(state); // if we've been instructed to skip tests, only run once no matter the argument: - uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(2); + uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(1); - speed_test.priorityAndLocalityWeightedHelper(state.range(1), endpoints, true); + speed_test.priorityAndLocalityWeightedHelper(state.range(0), endpoints, true); } } BENCHMARK(priorityAndLocalityWeighted) - ->Ranges({{false, true}, {false, true}, {1, 100000}}) + ->Ranges({{false, true}, {1, 100000}}) ->Unit(benchmark::kMillisecond); static void duplicateUpdate(State& state) { @@ -198,7 +189,7 @@ static void duplicateUpdate(State& state) { Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); for (auto _ : state) { - Envoy::Upstream::EdsSpeedTest speed_test(state, false); + Envoy::Upstream::EdsSpeedTest speed_test(state); uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0); speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); @@ -213,7 +204,7 @@ static void healthOnlyUpdate(State& state) { Envoy::Logger::Context logging_state(spdlog::level::warn, Envoy::Logger::Logger::DEFAULT_LOG_FORMAT, lock, false); for (auto _ : state) { - Envoy::Upstream::EdsSpeedTest speed_test(state, false); + Envoy::Upstream::EdsSpeedTest speed_test(state); uint32_t endpoints = skipExpensiveBenchmarks() ? 1 : state.range(0); speed_test.priorityAndLocalityWeightedHelper(true, endpoints, true); diff --git a/test/common/upstream/eds_test.cc b/test/common/upstream/eds_test.cc index ad84f1678ddfa..a3d987500aeb0 100644 --- a/test/common/upstream/eds_test.cc +++ b/test/common/upstream/eds_test.cc @@ -472,14 +472,14 @@ TEST_F(EdsTest, EndpointMetadata) { // New resources with Metadata updated. Config::Metadata::mutableMetadataValue(*canary->mutable_metadata(), Config::MetadataFilters::get().ENVOY_LB, "version") - .set_string_value("v2"); + .set_string_value("v3"); doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); auto& nhosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); EXPECT_EQ(nhosts.size(), 2); EXPECT_EQ(Config::Metadata::metadataValue(nhosts[1]->metadata().get(), Config::MetadataFilters::get().ENVOY_LB, "version") .string_value(), - "v2"); + "v3"); } // Test verifies that updating metadata updates @@ -534,7 +534,7 @@ TEST_F(EdsTest, EndpointHealthStatus) { auto* endpoints = cluster_load_assignment.add_endpoints(); // First check that EDS is correctly mapping - // envoy::api::v2::core::HealthStatus values to the expected health() status. + // HealthStatus values to the expected health() status. const std::vector> health_status_expected = { {envoy::config::core::v3::UNKNOWN, Host::Health::Healthy}, @@ -1475,7 +1475,7 @@ TEST_F(EdsTest, EndpointLocality) { } // Validate that onConfigUpdate() does not propagate locality weights to the host set when -// locality weighted balancing isn't configured. +// locality weighted balancing isn't configured and the cluster does not use LB policy extensions. TEST_F(EdsTest, EndpointLocalityWeightsIgnored) { envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; cluster_load_assignment.set_cluster_name("fare"); @@ -1503,12 +1503,77 @@ TEST_F(EdsTest, EndpointLocalityWeightsIgnored) { EXPECT_EQ(nullptr, cluster_->prioritySet().hostSetsPerPriority()[0]->localityWeights()); } +class EdsLocalityWeightsTest : public EdsTest { +public: + void expectLocalityWeightsPresentForClusterConfig(const std::string& config) { + envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; + cluster_load_assignment.set_cluster_name("fare"); + resetCluster(config, Cluster::InitializePhase::Secondary); + + { + auto* endpoints = cluster_load_assignment.add_endpoints(); + auto* locality = endpoints->mutable_locality(); + locality->set_region("oceania"); + locality->set_zone("hello"); + locality->set_sub_zone("world"); + endpoints->mutable_load_balancing_weight()->set_value(42); + + auto* endpoint_address = endpoints->add_lb_endpoints() + ->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address(); + endpoint_address->set_address("1.2.3.4"); + endpoint_address->set_port_value(80); + } + + { + auto* endpoints = cluster_load_assignment.add_endpoints(); + auto* locality = endpoints->mutable_locality(); + locality->set_region("space"); + locality->set_zone("station"); + locality->set_sub_zone("international"); + + auto* endpoint_address = endpoints->add_lb_endpoints() + ->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address(); + endpoint_address->set_address("1.2.3.5"); + endpoint_address->set_port_value(80); + } + + { + auto* endpoints = cluster_load_assignment.add_endpoints(); + auto* locality = endpoints->mutable_locality(); + locality->set_region("sugar"); + locality->set_zone("candy"); + locality->set_sub_zone("mountain"); + endpoints->mutable_load_balancing_weight()->set_value(37); + + auto* endpoint_address = endpoints->add_lb_endpoints() + ->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address(); + endpoint_address->set_address("1.2.3.6"); + endpoint_address->set_port_value(80); + } + + initialize(); + doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); + EXPECT_TRUE(initialized_); + + const auto& locality_weights = + *cluster_->prioritySet().hostSetsPerPriority()[0]->localityWeights(); + EXPECT_EQ(3, locality_weights.size()); + EXPECT_EQ(42, locality_weights[0]); + EXPECT_EQ(0, locality_weights[1]); + EXPECT_EQ(37, locality_weights[2]); + } +}; + // Validate that onConfigUpdate() propagates locality weights to the host set when locality // weighted balancing is configured. -TEST_F(EdsTest, EndpointLocalityWeights) { - envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; - cluster_load_assignment.set_cluster_name("fare"); - resetCluster(R"EOF( +TEST_F(EdsLocalityWeightsTest, WeightsPresentWithLocalityWeightedConfig) { + expectLocalityWeightsPresentForClusterConfig(R"EOF( name: name connect_timeout: 0.25s type: EDS @@ -1523,66 +1588,32 @@ TEST_F(EdsTest, EndpointLocalityWeights) { cluster_names: - eds refresh_delay: 1s - )EOF", - Cluster::InitializePhase::Secondary); - - { - auto* endpoints = cluster_load_assignment.add_endpoints(); - auto* locality = endpoints->mutable_locality(); - locality->set_region("oceania"); - locality->set_zone("hello"); - locality->set_sub_zone("world"); - endpoints->mutable_load_balancing_weight()->set_value(42); - - auto* endpoint_address = endpoints->add_lb_endpoints() - ->mutable_endpoint() - ->mutable_address() - ->mutable_socket_address(); - endpoint_address->set_address("1.2.3.4"); - endpoint_address->set_port_value(80); - } - - { - auto* endpoints = cluster_load_assignment.add_endpoints(); - auto* locality = endpoints->mutable_locality(); - locality->set_region("space"); - locality->set_zone("station"); - locality->set_sub_zone("international"); - - auto* endpoint_address = endpoints->add_lb_endpoints() - ->mutable_endpoint() - ->mutable_address() - ->mutable_socket_address(); - endpoint_address->set_address("1.2.3.5"); - endpoint_address->set_port_value(80); - } - - { - auto* endpoints = cluster_load_assignment.add_endpoints(); - auto* locality = endpoints->mutable_locality(); - locality->set_region("sugar"); - locality->set_zone("candy"); - locality->set_sub_zone("mountain"); - endpoints->mutable_load_balancing_weight()->set_value(37); - - auto* endpoint_address = endpoints->add_lb_endpoints() - ->mutable_endpoint() - ->mutable_address() - ->mutable_socket_address(); - endpoint_address->set_address("1.2.3.6"); - endpoint_address->set_port_value(80); - } - - initialize(); - doOnConfigUpdateVerifyNoThrow(cluster_load_assignment); - EXPECT_TRUE(initialized_); + )EOF"); +} - const auto& locality_weights = - *cluster_->prioritySet().hostSetsPerPriority()[0]->localityWeights(); - EXPECT_EQ(3, locality_weights.size()); - EXPECT_EQ(42, locality_weights[0]); - EXPECT_EQ(0, locality_weights[1]); - EXPECT_EQ(37, locality_weights[2]); +// Validate that onConfigUpdate() propagates locality weights to the host set when the cluster uses +// load balancing policy extensions. +TEST_F(EdsLocalityWeightsTest, WeightsPresentWithLoadBalancingPolicyConfig) { + // envoy.load_balancers.custom_lb is registered by linking in + // //test/integration/load_balancers:custom_lb_policy. + expectLocalityWeightsPresentForClusterConfig(R"EOF( + name: name + connect_timeout: 0.25s + type: EDS + lb_policy: LOAD_BALANCING_POLICY_CONFIG + load_balancing_policy: + policies: + - typed_extension_config: + name: envoy.load_balancers.custom_lb + eds_cluster_config: + service_name: fare + eds_config: + api_config_source: + api_type: REST + cluster_names: + - eds + refresh_delay: 1s + )EOF"); } // Validate that onConfigUpdate() removes any locality not referenced in the diff --git a/test/common/upstream/hds_test.cc b/test/common/upstream/hds_test.cc index 7f0801af4a6f8..48c673784d2b9 100644 --- a/test/common/upstream/hds_test.cc +++ b/test/common/upstream/hds_test.cc @@ -93,8 +93,7 @@ class HdsTest : public testing::Test { .WillRepeatedly(testing::ReturnNew>()); hds_delegate_ = std::make_unique( - stats_store_, Grpc::RawAsyncClientPtr(async_client_), - envoy::config::core::v3::ApiVersion::AUTO, dispatcher_, runtime_, stats_store_, + stats_store_, Grpc::RawAsyncClientPtr(async_client_), dispatcher_, runtime_, stats_store_, ssl_context_manager_, test_factory_, log_manager_, cm_, local_info_, admin_, singleton_manager_, tls_, validation_visitor_, *api_, options_); } diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 973eb95cacd0a..cc5fa210e69b5 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -147,9 +147,9 @@ class HttpHealthCheckerImplTest : public Event::TestUsingSimulatedTime, absl::node_hash_map; - void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { + void allocHealthChecker(const std::string& yaml) { health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, + *cluster_, parseHealthCheckFromV3Yaml(yaml), dispatcher_, runtime_, random_, HealthCheckEventLoggerPtr(event_logger_storage_.release())); } @@ -363,7 +363,7 @@ class HttpHealthCheckerImplTest : public Event::TestUsingSimulatedTime, addCompletionCallback(); } - void setupDeprecatedServiceNameValidationHC(const std::string& prefix) { + void setupServiceNameValidationHC(const std::string& prefix) { std::string yaml = fmt::format(R"EOF( timeout: 1s interval: 1s @@ -2076,6 +2076,7 @@ TEST_F(HttpHealthCheckerImplTest, DynamicAddAndRemove) { HostVector removed{cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}; cluster_->prioritySet().getMockHostSet(0)->hosts_.clear(); EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_)); + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); cluster_->prioritySet().getMockHostSet(0)->runCallbacks({}, removed); } @@ -2948,9 +2949,9 @@ class TestProdHttpHealthChecker : public ProdHttpHealthCheckerImpl { class ProdHttpHealthCheckerTest : public testing::Test, public HealthCheckerTestBase { public: - void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { + void allocHealthChecker(const std::string& yaml) { health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, + *cluster_, parseHealthCheckFromV3Yaml(yaml), dispatcher_, runtime_, random_, HealthCheckEventLoggerPtr(event_logger_storage_.release())); } @@ -3010,8 +3011,7 @@ TEST_F(ProdHttpHealthCheckerTest, ProdHttpHealthCheckerH1HealthChecking) { health_checker_->createCodecClientForTest(std::move(connection_))->type()); } -TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http1CodecClient)) { - TestDeprecatedV2Api _deprecated_v2_api; +TEST_F(HttpHealthCheckerImplTest, Http1CodecClient) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3023,16 +3023,15 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http1CodecClient)) { service_name_matcher: prefix: locations path: /healthcheck - use_http2: false + codec_client_type: Http1 )EOF"; - allocHealthChecker(yaml, false); + allocHealthChecker(yaml); addCompletionCallback(); EXPECT_EQ(Http::CodecType::HTTP1, health_checker_->codecClientType()); } -TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http2CodecClient)) { - TestDeprecatedV2Api _deprecated_v2_api; +TEST_F(HttpHealthCheckerImplTest, Http2CodecClient) { const std::string yaml = R"EOF( timeout: 1s interval: 1s @@ -3044,18 +3043,18 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(Http2CodecClient)) { service_name_matcher: prefix: locations path: /healthcheck - use_http2: true + codec_client_type: Http2 )EOF"; - allocHealthChecker(yaml, false); + allocHealthChecker(yaml); addCompletionCallback(); EXPECT_EQ(Http::CodecType::HTTP2, health_checker_->codecClientType()); } -TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMatch)) { +TEST_F(HttpHealthCheckerImplTest, ServiceNameMatch) { const std::string host = "fake_cluster"; const std::string path = "/healthcheck"; - setupDeprecatedServiceNameValidationHC("locations"); + setupServiceNameValidationHC("locations"); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); @@ -3087,8 +3086,8 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMatch)) { EXPECT_EQ(Host::Health::Healthy, cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->health()); } -TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMismatch)) { - setupDeprecatedServiceNameValidationHC("locations"); +TEST_F(HttpHealthCheckerImplTest, ServiceNameMismatch) { + setupServiceNameValidationHC("locations"); EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillOnce(Return(true)); @@ -3372,9 +3371,9 @@ class TcpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase, public Event::TestUsingSimulatedTime { public: - void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { + void allocHealthChecker(const std::string& yaml) { health_checker_ = std::make_shared( - *cluster_, parseHealthCheckFromV3Yaml(yaml, avoid_boosting), dispatcher_, runtime_, random_, + *cluster_, parseHealthCheckFromV3Yaml(yaml), dispatcher_, runtime_, random_, HealthCheckEventLoggerPtr(event_logger_storage_.release())); } @@ -4659,6 +4658,7 @@ TEST_F(GrpcHealthCheckerImplTest, DynamicAddAndRemove) { HostVector removed{cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}; cluster_->prioritySet().getMockHostSet(0)->hosts_.clear(); EXPECT_CALL(*test_sessions_[0]->client_connection_, close(_)); + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); cluster_->prioritySet().getMockHostSet(0)->runCallbacks({}, removed); } diff --git a/test/common/upstream/leds_test.cc b/test/common/upstream/leds_test.cc new file mode 100644 index 0000000000000..300c6c20ad8cd --- /dev/null +++ b/test/common/upstream/leds_test.cc @@ -0,0 +1,324 @@ +#include + +#include "envoy/config/endpoint/v3/endpoint.pb.h" +#include "envoy/config/endpoint/v3/endpoint_components.pb.h" +#include "envoy/stats/scope.h" + +#include "source/common/config/utility.h" +#include "source/common/config/xds_resource.h" +#include "source/common/singleton/manager_impl.h" +#include "source/common/upstream/leds.h" + +#include "test/common/stats/stat_test_utility.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/protobuf/mocks.h" +#include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/admin.h" +#include "test/mocks/server/instance.h" +#include "test/mocks/ssl/mocks.h" +#include "test/mocks/upstream/cluster_manager.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::AnyNumber; +using testing::Eq; + +namespace Envoy { +namespace Upstream { +namespace { + +class LedsTest : public testing::Test { +public: + LedsTest() : api_(Api::createApiForTest(stats_)) {} + + // Builds an LbEndpoint proto object. + static envoy::config::endpoint::v3::LbEndpoint buildLbEndpoint(const std::string& address, + uint32_t port) { + return TestUtility::parseYaml(fmt::format(R"EOF( + endpoint: + address: + socket_address: + address: {} + port_value: {} + )EOF", + address, + port)); + } + + // Returns a list of added Resource objects as is being returned in a delta-xDS response. + static Protobuf::RepeatedPtrField + buildAddedResources(const std::vector& added_or_updated, + const std::vector& resources_names) { + Protobuf::RepeatedPtrField to_ret; + + ASSERT(added_or_updated.size() == resources_names.size()); + + for (size_t idx = 0; idx < added_or_updated.size(); ++idx) { + const auto& lb_endpoint = added_or_updated[idx]; + const auto& resource_name = resources_names[idx]; + auto* resource = to_ret.Add(); + resource->set_name(resource_name); + resource->set_version("1"); + resource->mutable_resource()->PackFrom(lb_endpoint); + } + + return to_ret; + } + + // Returns a list of removed resource names as is being returned in a delta-xDS response. + static Protobuf::RepeatedPtrField + buildRemovedResources(const std::vector& removed) { + return Protobuf::RepeatedPtrField{removed.begin(), removed.end()}; + } + + // Creates a leds configuration given a YAML string. + static envoy::config::endpoint::v3::LedsClusterLocalityConfig + makeLedsConfiguration(absl::string_view leds_config_yaml) { + // Set the LEDS config. + envoy::config::endpoint::v3::LedsClusterLocalityConfig leds_config; + TestUtility::loadFromYaml(std::string(leds_config_yaml), leds_config); + + return leds_config; + } + + void initialize() { initialize(makeLedsConfiguration(DEFAULT_LEDS_CONFIG_YAML)); } + + void initialize(const envoy::config::endpoint::v3::LedsClusterLocalityConfig& leds_config) { + local_info_.node_.mutable_locality()->set_zone("us-east-1a"); + + cluster_scope_ = stats_.createScope("cluster.xds_cluster."); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *cluster_scope_, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + + // Setup LEDS subscription. + EXPECT_CALL(cm_.subscription_factory_, + collectionSubscriptionFromUrl( + _, _, + Eq(envoy::config::endpoint::v3::LbEndpoint().GetDescriptor()->full_name()), _, + _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke( + [this, &leds_config](const xds::core::v3::ResourceLocator& locator_url, + const envoy::config::core::v3::ConfigSource&, absl::string_view, + Stats::Scope&, Envoy::Config::SubscriptionCallbacks& callbacks, + Envoy::Config::OpaqueResourceDecoder&) { + // Verify that the locator is correct. + Config::XdsResourceIdentifier::EncodeOptions encode_options; + encode_options.sort_context_params_ = true; + EXPECT_EQ(leds_config.leds_collection_name(), + Config::XdsResourceIdentifier::encodeUrl(locator_url, encode_options)); + // Set the callbacks, and verify that start() is called correctly. + auto ret = std::make_unique>(); + leds_callbacks_ = &callbacks; + EXPECT_CALL(*ret, start(_)) + .WillOnce(Invoke([](const absl::flat_hash_set& resource_names) { + // No resource names for a glob collection. + EXPECT_EQ(resource_names.size(), 0); + })); + return ret; + })); + + leds_subscription_ = std::make_unique(leds_config, "xds_cluster", + factory_context, *cluster_scope_.get(), + [&]() { callbacks_called_counter_++; }); + } + + static void compareEndpointsMapContents( + const LedsSubscription::LbEndpointsMap& actual_map, + const std::vector>& + expected_contents) { + EXPECT_EQ(actual_map.size(), expected_contents.size()); + for (const auto& [resource_name, proto_value] : expected_contents) { + const auto map_it = actual_map.find(resource_name); + EXPECT_TRUE(map_it != actual_map.end()); + EXPECT_THAT(proto_value, ProtoEq(map_it->second)); + } + } + + static constexpr absl::string_view DEFAULT_LEDS_CONFIG_YAML{R"EOF( + leds_config: + api_config_source: + api_type: DELTA_GRPC + grpc_services: + envoy_grpc: + cluster_name: xds_cluster + resource_api_version: V3 + leds_collection_name: xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/* + )EOF"}; + + // Number of times the LEDS subscription callback was called. + uint32_t callbacks_called_counter_{0}; + Stats::TestUtil::TestStore stats_; + Ssl::MockContextManager ssl_context_manager_; + NiceMock cm_; + NiceMock dispatcher_; + Envoy::Stats::ScopePtr cluster_scope_; + LedsSubscriptionPtr leds_subscription_; + NiceMock random_; + NiceMock runtime_; + NiceMock local_info_; + NiceMock admin_; + Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; + NiceMock tls_; + NiceMock validation_visitor_; + Api::ApiPtr api_; + Server::MockOptions options_; + + Config::SubscriptionCallbacks* leds_callbacks_{}; +}; + +// Verify that a successful onConfigUpdate() calls the callback, and has all the +// endpoints. +TEST_F(LedsTest, OnConfigUpdateSuccess) { + initialize(); + const auto lb_endpoint = buildLbEndpoint("127.0.0.1", 12345); + const auto& added_resources = buildAddedResources( + {lb_endpoint}, {"xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/endpoint0"}); + const auto decoded_resources = + TestUtility::decodeResources(added_resources); + const Protobuf::RepeatedPtrField removed_resources; + leds_callbacks_->onConfigUpdate(decoded_resources.refvec_, removed_resources, ""); + EXPECT_EQ(1UL, callbacks_called_counter_); + EXPECT_TRUE(leds_subscription_->isUpdated()); + const auto& all_endpoints_map = leds_subscription_->getEndpointsMap(); + EXPECT_EQ(1UL, all_endpoints_map.size()); + EXPECT_TRUE(TestUtility::protoEqual(lb_endpoint, all_endpoints_map.begin()->second)); +} + +// Verify that onConfigUpdate() with empty LbEndpoints vector size ignores config. +TEST_F(LedsTest, OnConfigUpdateEmpty) { + initialize(); + EXPECT_FALSE(leds_subscription_->isUpdated()); + const Protobuf::RepeatedPtrField removed_resources; + leds_callbacks_->onConfigUpdate({}, removed_resources, ""); + EXPECT_EQ(1UL, stats_.counter("cluster.xds_cluster.leds.update_empty").value()); + // Verify that the callback was called even after an empty update. + EXPECT_EQ(1UL, callbacks_called_counter_); + EXPECT_TRUE(leds_subscription_->isUpdated()); + + // Verify that the second time an empty update arrives, the callback isn't called. + leds_callbacks_->onConfigUpdate({}, removed_resources, ""); + EXPECT_EQ(2UL, stats_.counter("cluster.xds_cluster.leds.update_empty").value()); + EXPECT_EQ(1UL, callbacks_called_counter_); +} + +// Verify that onConfigUpdateFailed() calls the callback. +TEST_F(LedsTest, OnConfigUpdateFailed) { + initialize(); + EXPECT_FALSE(leds_subscription_->isUpdated()); + const std::unique_ptr ex = std::make_unique("Update Failed"); + leds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, + ex.get()); + // Verify that the callback was called even after an failed update. + EXPECT_EQ(1UL, callbacks_called_counter_); + EXPECT_TRUE(leds_subscription_->isUpdated()); + EXPECT_EQ(0UL, leds_subscription_->getEndpointsMap().size()); +} + +// Verify that onConfigUpdateFailed() doesn't change the endpoints. +TEST_F(LedsTest, OnConfigUpdateFailedEndpoints) { + initialize(); + // Add an endpoint. + const auto lb_endpoint = buildLbEndpoint("127.0.0.1", 12345); + const auto& added_resources = buildAddedResources( + {lb_endpoint}, {"xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/endpoint0"}); + const auto decoded_resources = + TestUtility::decodeResources(added_resources); + const Protobuf::RepeatedPtrField removed_resources; + leds_callbacks_->onConfigUpdate(decoded_resources.refvec_, removed_resources, ""); + EXPECT_EQ(1UL, callbacks_called_counter_); + + // Verify there's an endpoint. + const auto& all_endpoints_map = leds_subscription_->getEndpointsMap(); + EXPECT_EQ(1UL, all_endpoints_map.size()); + EXPECT_TRUE(TestUtility::protoEqual(lb_endpoint, all_endpoints_map.begin()->second)); + + // Fail the config. + const std::unique_ptr ex = std::make_unique("Update Failed"); + leds_callbacks_->onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::FetchTimedout, + ex.get()); + EXPECT_EQ(2UL, callbacks_called_counter_); + + // Verify that the same endpoint exists. + const auto& all_endpoints_map2 = leds_subscription_->getEndpointsMap(); + EXPECT_EQ(1UL, all_endpoints_map2.size()); + EXPECT_TRUE(TestUtility::protoEqual(lb_endpoint, all_endpoints_map2.begin()->second)); +} + +// Verify the update of an endpoint. +TEST_F(LedsTest, UpdateEndpoint) { + initialize(); + // Add 2 endpoints. + const std::string lb_endpoint1_name{ + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/endpoint0"}; + const std::string lb_endpoint2_name{ + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/endpoint1"}; + const auto lb_endpoint1 = buildLbEndpoint("127.0.0.1", 12345); + const auto lb_endpoint2 = buildLbEndpoint("127.0.0.1", 54321); + const auto& added_resources = + buildAddedResources({lb_endpoint1, lb_endpoint2}, {lb_endpoint1_name, lb_endpoint2_name}); + const auto decoded_resources = + TestUtility::decodeResources(added_resources); + const Protobuf::RepeatedPtrField removed_resources; + leds_callbacks_->onConfigUpdate(decoded_resources.refvec_, removed_resources, ""); + EXPECT_EQ(1UL, callbacks_called_counter_); + EXPECT_TRUE(leds_subscription_->isUpdated()); + const auto& all_endpoints_map = leds_subscription_->getEndpointsMap(); + EXPECT_EQ(2UL, all_endpoints_map.size()); + compareEndpointsMapContents( + all_endpoints_map, {{lb_endpoint1_name, lb_endpoint1}, {lb_endpoint2_name, lb_endpoint2}}); + + // Update the first endpoint. + const auto lb_endpoint1_update = buildLbEndpoint("127.0.0.1", 12346); + const auto& updated_resources = buildAddedResources({lb_endpoint1_update}, {lb_endpoint1_name}); + const auto decoded_resources_update = + TestUtility::decodeResources(updated_resources); + leds_callbacks_->onConfigUpdate(decoded_resources_update.refvec_, removed_resources, ""); + EXPECT_EQ(2UL, callbacks_called_counter_); + EXPECT_TRUE(leds_subscription_->isUpdated()); + const auto& all_endpoints_update = leds_subscription_->getEndpointsMap(); + EXPECT_EQ(2UL, all_endpoints_update.size()); + compareEndpointsMapContents(all_endpoints_map, {{lb_endpoint1_name, lb_endpoint1_update}, + {lb_endpoint2_name, lb_endpoint2}}); +} + +// Verify adding 2 endpoints then removing one. +TEST_F(LedsTest, RemoveEndpoint) { + initialize(); + // Add 2 endpoints. + const auto lb_endpoint1_name{ + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/endpoint0"}; + const auto lb_endpoint2_name{ + "xdstp://test/envoy.config.endpoint.v3.LbEndpoint/foo-endpoints/endpoint1"}; + const auto lb_endpoint1 = buildLbEndpoint("127.0.0.1", 12345); + const auto lb_endpoint2 = buildLbEndpoint("127.0.0.1", 54321); + const auto& added_resources = + buildAddedResources({lb_endpoint1, lb_endpoint2}, {lb_endpoint1_name, lb_endpoint2_name}); + const auto decoded_resources = + TestUtility::decodeResources(added_resources); + const Protobuf::RepeatedPtrField removed_resources; + leds_callbacks_->onConfigUpdate(decoded_resources.refvec_, removed_resources, ""); + EXPECT_EQ(1UL, callbacks_called_counter_); + EXPECT_TRUE(leds_subscription_->isUpdated()); + const auto& all_endpoints_map = leds_subscription_->getEndpointsMap(); + EXPECT_EQ(2UL, all_endpoints_map.size()); + compareEndpointsMapContents( + all_endpoints_map, {{lb_endpoint1_name, lb_endpoint1}, {lb_endpoint2_name, lb_endpoint2}}); + + // Remove the first endpoint. + const auto& removed_resources_update = buildRemovedResources({lb_endpoint1_name}); + leds_callbacks_->onConfigUpdate({}, removed_resources_update, ""); + EXPECT_EQ(2UL, callbacks_called_counter_); + EXPECT_TRUE(leds_subscription_->isUpdated()); + const auto& all_endpoints_update = leds_subscription_->getEndpointsMap(); + EXPECT_EQ(1UL, all_endpoints_update.size()); + compareEndpointsMapContents(all_endpoints_map, {{lb_endpoint2_name, lb_endpoint2}}); +} + +} // namespace +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/load_stats_reporter_test.cc b/test/common/upstream/load_stats_reporter_test.cc index 81edc2ed4377a..9790151c2c5e2 100644 --- a/test/common/upstream/load_stats_reporter_test.cc +++ b/test/common/upstream/load_stats_reporter_test.cc @@ -45,8 +45,7 @@ class LoadStatsReporterTest : public testing::Test { return response_timer_; })); load_stats_reporter_ = std::make_unique( - local_info_, cm_, stats_store_, Grpc::RawAsyncClientPtr(async_client_), - envoy::config::core::v3::ApiVersion::AUTO, dispatcher_); + local_info_, cm_, stats_store_, Grpc::RawAsyncClientPtr(async_client_), dispatcher_); } void expectSendMessage( diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index bd7f0805a7aca..9afdd78e71c92 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -44,11 +44,10 @@ class LogicalDnsClusterTest : public Event::TestUsingSimulatedTime, public testi protected: LogicalDnsClusterTest() : api_(Api::createApiForTest(stats_store_, random_)) {} - void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { + void setupFromV3Yaml(const std::string& yaml) { resolve_timer_ = new Event::MockTimer(&dispatcher_); NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = - parseClusterFromV3Yaml(yaml, avoid_boosting); + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 9b5cb0db5abe5..bd170104bb951 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -71,9 +71,7 @@ class OriginalDstClusterTest : public Event::TestUsingSimulatedTime, public test : cleanup_timer_(new Event::MockTimer(&dispatcher_)), api_(Api::createApiForTest(stats_store_)) {} - void setupFromYaml(const std::string& yaml, bool avoid_boosting = true) { - setup(parseClusterFromV3Yaml(yaml, avoid_boosting)); - } + void setupFromYaml(const std::string& yaml) { setup(parseClusterFromV3Yaml(yaml)); } void setup(const envoy::config::cluster::v3::Cluster& cluster_config) { NiceMock cm; @@ -210,7 +208,7 @@ TEST_F(OriginalDstClusterTest, NoContext) { { NiceMock connection; TestLoadBalancerContext lb_context(&connection); - connection.stream_info_.downstream_address_provider_->restoreLocalAddress( + connection.stream_info_.downstream_connection_info_provider_->restoreLocalAddress( std::make_shared("unix://foo")); OriginalDstCluster::LoadBalancer lb(cluster_); @@ -245,7 +243,7 @@ TEST_F(OriginalDstClusterTest, Membership) { NiceMock connection; TestLoadBalancerContext lb_context(&connection); - connection.stream_info_.downstream_address_provider_->restoreLocalAddress( + connection.stream_info_.downstream_connection_info_provider_->restoreLocalAddress( std::make_shared("10.10.11.11")); Event::PostCb post_cb; @@ -256,7 +254,7 @@ TEST_F(OriginalDstClusterTest, Membership) { auto cluster_hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); ASSERT_NE(host, nullptr); - EXPECT_EQ(*connection.addressProvider().localAddress(), *host->address()); + EXPECT_EQ(*connection.connectionInfoProvider().localAddress(), *host->address()); EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); @@ -266,7 +264,7 @@ TEST_F(OriginalDstClusterTest, Membership) { cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size()); EXPECT_EQ(host, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]); - EXPECT_EQ(*connection.addressProvider().localAddress(), + EXPECT_EQ(*connection.connectionInfoProvider().localAddress(), *cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->address()); // Same host is returned on the 2nd call @@ -334,12 +332,12 @@ TEST_F(OriginalDstClusterTest, Membership2) { NiceMock connection1; TestLoadBalancerContext lb_context1(&connection1); - connection1.stream_info_.downstream_address_provider_->restoreLocalAddress( + connection1.stream_info_.downstream_connection_info_provider_->restoreLocalAddress( std::make_shared("10.10.11.11")); NiceMock connection2; TestLoadBalancerContext lb_context2(&connection2); - connection2.stream_info_.downstream_address_provider_->restoreLocalAddress( + connection2.stream_info_.downstream_connection_info_provider_->restoreLocalAddress( std::make_shared("10.10.11.12")); OriginalDstCluster::LoadBalancer lb(cluster_); @@ -349,14 +347,14 @@ TEST_F(OriginalDstClusterTest, Membership2) { HostConstSharedPtr host1 = lb.chooseHost(&lb_context1); post_cb(); ASSERT_NE(host1, nullptr); - EXPECT_EQ(*connection1.addressProvider().localAddress(), *host1->address()); + EXPECT_EQ(*connection1.connectionInfoProvider().localAddress(), *host1->address()); EXPECT_CALL(membership_updated_, ready()); EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb)); HostConstSharedPtr host2 = lb.chooseHost(&lb_context2); post_cb(); ASSERT_NE(host2, nullptr); - EXPECT_EQ(*connection2.addressProvider().localAddress(), *host2->address()); + EXPECT_EQ(*connection2.connectionInfoProvider().localAddress(), *host2->address()); EXPECT_EQ(2UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); EXPECT_EQ(2UL, cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); @@ -366,11 +364,11 @@ TEST_F(OriginalDstClusterTest, Membership2) { cluster_->prioritySet().hostSetsPerPriority()[0]->healthyHostsPerLocality().get().size()); EXPECT_EQ(host1, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]); - EXPECT_EQ(*connection1.addressProvider().localAddress(), + EXPECT_EQ(*connection1.connectionInfoProvider().localAddress(), *cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[0]->address()); EXPECT_EQ(host2, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[1]); - EXPECT_EQ(*connection2.addressProvider().localAddress(), + EXPECT_EQ(*connection2.connectionInfoProvider().localAddress(), *cluster_->prioritySet().hostSetsPerPriority()[0]->hosts()[1]->address()); auto cluster_hosts = cluster_->prioritySet().hostSetsPerPriority()[0]->hosts(); @@ -423,7 +421,7 @@ TEST_F(OriginalDstClusterTest, Connection) { // Connection to the host is made to the downstream connection's local address. NiceMock connection; TestLoadBalancerContext lb_context(&connection); - connection.stream_info_.downstream_address_provider_->restoreLocalAddress( + connection.stream_info_.downstream_connection_info_provider_->restoreLocalAddress( std::make_shared("FD00::1")); OriginalDstCluster::LoadBalancer lb(cluster_); @@ -432,10 +430,11 @@ TEST_F(OriginalDstClusterTest, Connection) { HostConstSharedPtr host = lb.chooseHost(&lb_context); post_cb(); ASSERT_NE(host, nullptr); - EXPECT_EQ(*connection.addressProvider().localAddress(), *host->address()); + EXPECT_EQ(*connection.connectionInfoProvider().localAddress(), *host->address()); - EXPECT_CALL(dispatcher_, createClientConnection_( - PointeesEq(connection.addressProvider().localAddress()), _, _, _)) + EXPECT_CALL(dispatcher_, + createClientConnection_( + PointeesEq(connection.connectionInfoProvider().localAddress()), _, _, _)) .WillOnce(Return(new NiceMock())); host->createConnection(dispatcher_, nullptr, nullptr); } @@ -473,7 +472,7 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { // Connection to the host is made to the downstream connection's local address. NiceMock connection; TestLoadBalancerContext lb_context(&connection); - connection.stream_info_.downstream_address_provider_->restoreLocalAddress( + connection.stream_info_.downstream_connection_info_provider_->restoreLocalAddress( std::make_shared("FD00::1")); OriginalDstCluster::LoadBalancer lb(cluster_); @@ -482,7 +481,7 @@ TEST_F(OriginalDstClusterTest, MultipleClusters) { HostConstSharedPtr host = lb.chooseHost(&lb_context); post_cb(); ASSERT_NE(host, nullptr); - EXPECT_EQ(*connection.addressProvider().localAddress(), *host->address()); + EXPECT_EQ(*connection.connectionInfoProvider().localAddress(), *host->address()); EXPECT_EQ(1UL, cluster_->prioritySet().hostSetsPerPriority()[0]->hosts().size()); // Check that 'second' also gets updated @@ -587,7 +586,7 @@ TEST_F(OriginalDstClusterTest, UseHttpHeaderDisabled) { // Downstream connection with original_dst filter, HTTP header override ignored. NiceMock connection1; - connection1.stream_info_.downstream_address_provider_->restoreLocalAddress( + connection1.stream_info_.downstream_connection_info_provider_->restoreLocalAddress( std::make_shared("10.10.11.11")); TestLoadBalancerContext lb_context1(&connection1, Http::Headers::get().EnvoyOriginalDstHost.get(), "127.0.0.1:5555"); @@ -597,11 +596,11 @@ TEST_F(OriginalDstClusterTest, UseHttpHeaderDisabled) { HostConstSharedPtr host1 = lb.chooseHost(&lb_context1); post_cb(); ASSERT_NE(host1, nullptr); - EXPECT_EQ(*connection1.addressProvider().localAddress(), *host1->address()); + EXPECT_EQ(*connection1.connectionInfoProvider().localAddress(), *host1->address()); // Downstream connection without original_dst filter, HTTP header override ignored. NiceMock connection2; - connection2.stream_info_.downstream_address_provider_->setLocalAddress( + connection2.stream_info_.downstream_connection_info_provider_->setLocalAddress( std::make_shared("10.10.11.11")); TestLoadBalancerContext lb_context2(&connection2, Http::Headers::get().EnvoyOriginalDstHost.get(), "127.0.0.1:5555"); @@ -613,7 +612,7 @@ TEST_F(OriginalDstClusterTest, UseHttpHeaderDisabled) { // Downstream connection over Unix Domain Socket, HTTP header override ignored. NiceMock connection3; - connection3.stream_info_.downstream_address_provider_->setLocalAddress( + connection3.stream_info_.downstream_connection_info_provider_->setLocalAddress( std::make_shared("unix://foo")); TestLoadBalancerContext lb_context3(&connection3, Http::Headers::get().EnvoyOriginalDstHost.get(), "127.0.0.1:5555"); diff --git a/test/common/upstream/scheduler_benchmark.cc b/test/common/upstream/scheduler_benchmark.cc new file mode 100644 index 0000000000000..7d1ed311d76d6 --- /dev/null +++ b/test/common/upstream/scheduler_benchmark.cc @@ -0,0 +1,185 @@ +#include +#include +#include +#include + +#include "source/common/common/random_generator.h" +#include "source/common/upstream/edf_scheduler.h" +#include "source/common/upstream/wrsq_scheduler.h" + +#include "test/benchmark/main.h" + +#include "benchmark/benchmark.h" + +namespace Envoy { +namespace Upstream { +namespace { + +class SchedulerTester { +public: + struct ObjInfo { + double weight; + }; + + static std::vector> + setupSplitWeights(Scheduler& sched, size_t num_objs, ::benchmark::State& state) { + std::vector> info; + + state.PauseTiming(); + for (uint32_t i = 0; i < num_objs; ++i) { + auto oi = std::make_shared(); + if (i < num_objs / 2) { + oi->weight = static_cast(1); + } else { + oi->weight = static_cast(4); + } + + info.emplace_back(oi); + } + + std::shuffle(info.begin(), info.end(), std::default_random_engine()); + state.ResumeTiming(); + + for (auto& oi : info) { + sched.add(oi->weight, oi); + } + + return info; + } + + static std::vector> + setupUniqueWeights(Scheduler& sched, size_t num_objs, ::benchmark::State& state) { + std::vector> info; + + state.PauseTiming(); + for (uint32_t i = 0; i < num_objs; ++i) { + auto oi = std::make_shared(); + oi->weight = static_cast(i + 1); + + info.emplace_back(oi); + } + + std::shuffle(info.begin(), info.end(), std::default_random_engine()); + state.ResumeTiming(); + + for (auto& oi : info) { + sched.add(oi->weight, oi); + } + + return info; + } + + static void + pickTest(Scheduler& sched, ::benchmark::State& state, + std::function>(Scheduler&)> setup) { + std::vector> obj_info; + for (auto _ : state) { // NOLINT: Silences warning about dead store + if (obj_info.empty()) { + obj_info = setup(sched); + } + + sched.pickAndAdd([](const auto& i) { return i.weight; }); + } + } +}; + +void splitWeightAddEdf(::benchmark::State& state) { + EdfScheduler edf; + const size_t num_objs = state.range(0); + for (auto _ : state) { // NOLINT: Silences warning about dead store + SchedulerTester::setupSplitWeights(edf, num_objs, state); + } +} + +void uniqueWeightAddEdf(::benchmark::State& state) { + EdfScheduler edf; + const size_t num_objs = state.range(0); + for (auto _ : state) { // NOLINT: Silences warning about dead store + SchedulerTester::setupUniqueWeights(edf, num_objs, state); + } +} + +void splitWeightPickEdf(::benchmark::State& state) { + EdfScheduler edf; + const size_t num_objs = state.range(0); + + SchedulerTester::pickTest(edf, state, + [num_objs, &state](Scheduler& sched) { + return SchedulerTester::setupSplitWeights(sched, num_objs, state); + }); +} + +void uniqueWeightPickEdf(::benchmark::State& state) { + EdfScheduler edf; + const size_t num_objs = state.range(0); + + SchedulerTester::pickTest(edf, state, + [num_objs, &state](Scheduler& sched) { + return SchedulerTester::setupUniqueWeights(sched, num_objs, state); + }); +} + +void splitWeightAddWRSQ(::benchmark::State& state) { + Random::RandomGeneratorImpl random; + WRSQScheduler wrsq(random); + const size_t num_objs = state.range(0); + for (auto _ : state) { // NOLINT: Silences warning about dead store + SchedulerTester::setupSplitWeights(wrsq, num_objs, state); + } +} + +void uniqueWeightAddWRSQ(::benchmark::State& state) { + Random::RandomGeneratorImpl random; + WRSQScheduler wrsq(random); + const size_t num_objs = state.range(0); + for (auto _ : state) { // NOLINT: Silences warning about dead store + SchedulerTester::setupUniqueWeights(wrsq, num_objs, state); + } +} + +void splitWeightPickWRSQ(::benchmark::State& state) { + Random::RandomGeneratorImpl random; + WRSQScheduler wrsq(random); + const size_t num_objs = state.range(0); + + SchedulerTester::pickTest(wrsq, state, + [num_objs, &state](Scheduler& sched) { + return SchedulerTester::setupSplitWeights(sched, num_objs, state); + }); +} + +void uniqueWeightPickWRSQ(::benchmark::State& state) { + Random::RandomGeneratorImpl random; + WRSQScheduler wrsq(random); + const size_t num_objs = state.range(0); + + SchedulerTester::pickTest(wrsq, state, + [num_objs, &state](Scheduler& sched) { + return SchedulerTester::setupUniqueWeights(sched, num_objs, state); + }); +} + +BENCHMARK(splitWeightAddEdf) + ->Unit(::benchmark::kMicrosecond) + ->RangeMultiplier(8) + ->Range(1 << 6, 1 << 14); +BENCHMARK(splitWeightAddWRSQ) + ->Unit(::benchmark::kMicrosecond) + ->RangeMultiplier(8) + ->Range(1 << 6, 1 << 14); +BENCHMARK(splitWeightPickEdf)->RangeMultiplier(8)->Range(1 << 6, 1 << 14); +BENCHMARK(splitWeightPickWRSQ)->RangeMultiplier(8)->Range(1 << 6, 1 << 14); +BENCHMARK(uniqueWeightAddEdf) + ->Unit(::benchmark::kMicrosecond) + ->RangeMultiplier(8) + ->Range(1 << 6, 1 << 14); +BENCHMARK(uniqueWeightAddWRSQ) + ->Unit(::benchmark::kMicrosecond) + ->RangeMultiplier(8) + ->Range(1 << 6, 1 << 14); +BENCHMARK(uniqueWeightPickEdf)->RangeMultiplier(8)->Range(1 << 6, 1 << 14); +BENCHMARK(uniqueWeightPickWRSQ)->RangeMultiplier(8)->Range(1 << 6, 1 << 14); + +} // namespace +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index b0a30974f3b2c..463f4fb2a9222 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -215,7 +215,7 @@ class MockedUpdatedClusterManagerImpl : public TestClusterManagerImpl { } } - void postThreadLocalDrainConnections(const Cluster&, const HostVector& hosts_removed) override { + void postThreadLocalRemoveHosts(const Cluster&, const HostVector& hosts_removed) override { local_hosts_removed_.post(hosts_removed); } diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 969fd4211e966..029b12bc33d8b 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -1307,6 +1308,80 @@ TEST_F(HostImplTest, HostnameCanaryAndLocality) { EXPECT_EQ(1, host.priority()); } +TEST_F(HostImplTest, CreateConnection) { + MockClusterMockPrioritySet cluster; + envoy::config::core::v3::Metadata metadata; + Config::Metadata::mutableMetadataValue(metadata, Config::MetadataFilters::get().ENVOY_LB, + Config::MetadataEnvoyLbKeys::get().CANARY) + .set_bool_value(true); + envoy::config::core::v3::Locality locality; + locality.set_region("oceania"); + locality.set_zone("hello"); + locality.set_sub_zone("world"); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.1:1234"); + auto host = std::make_shared( + cluster.info_, "lyft.com", address, + std::make_shared(metadata), 1, locality, + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 1, + envoy::config::core::v3::UNKNOWN, simTime()); + + testing::StrictMock dispatcher; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options; + Network::ConnectionSocket::OptionsSharedPtr options; + Network::MockTransportSocketFactory socket_factory; + + auto connection = new testing::StrictMock(); + EXPECT_CALL(*connection, setBufferLimits(0)); + EXPECT_CALL(dispatcher, createClientConnection_(_, _, _, _)).WillOnce(Return(connection)); + + Envoy::Upstream::Host::CreateConnectionData connection_data = + host->createConnection(dispatcher, options, transport_socket_options); + EXPECT_EQ(connection, connection_data.connection_.get()); +} + +TEST_F(HostImplTest, CreateConnectionHappyEyeballs) { + MockClusterMockPrioritySet cluster; + envoy::config::core::v3::Metadata metadata; + Config::Metadata::mutableMetadataValue(metadata, Config::MetadataFilters::get().ENVOY_LB, + Config::MetadataEnvoyLbKeys::get().CANARY) + .set_bool_value(true); + envoy::config::core::v3::Locality locality; + locality.set_region("oceania"); + locality.set_zone("hello"); + locality.set_sub_zone("world"); + Network::Address::InstanceConstSharedPtr address = + Network::Utility::resolveUrl("tcp://10.0.0.1:1234"); + auto host = std::make_shared( + cluster.info_, "lyft.com", address, + std::make_shared(metadata), 1, locality, + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 1, + envoy::config::core::v3::UNKNOWN, simTime()); + + testing::StrictMock dispatcher; + Network::TransportSocketOptionsConstSharedPtr transport_socket_options; + Network::ConnectionSocket::OptionsSharedPtr options; + Network::MockTransportSocketFactory socket_factory; + + std::vector address_list = { + Network::Utility::resolveUrl("tcp://10.0.0.1:1235"), + address, + }; + host->setAddressList(address_list); + auto connection = new testing::StrictMock(); + EXPECT_CALL(*connection, setBufferLimits(0)); + EXPECT_CALL(*connection, addConnectionCallbacks(_)); + // The underlying connection should be created with the first address in the list. + EXPECT_CALL(dispatcher, createClientConnection_(address_list[0], _, _, _)) + .WillOnce(Return(connection)); + EXPECT_CALL(dispatcher, createTimer_(_)); + + Envoy::Upstream::Host::CreateConnectionData connection_data = + host->createConnection(dispatcher, options, transport_socket_options); + // The created connection will be wrapped in a HappyEyeballsConnectionImpl. + EXPECT_NE(connection, connection_data.connection_.get()); +} + TEST_F(HostImplTest, HealthFlags) { MockClusterMockPrioritySet cluster; HostSharedPtr host = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 1); @@ -2191,7 +2266,7 @@ TEST(PrioritySet, Extend) { auto member_update_cb = priority_set.addMemberUpdateCb( [&](const HostVector&, const HostVector&) -> void { ++membership_changes; }); - // The initial priority set starts with priority level 0.. + // The initial priority set starts with priority level 0. EXPECT_EQ(1, priority_set.hostSetsPerPriority().size()); EXPECT_EQ(0, priority_set.hostSetsPerPriority()[0]->hosts().size()); EXPECT_EQ(0, priority_set.hostSetsPerPriority()[0]->priority()); @@ -2215,21 +2290,26 @@ TEST(PrioritySet, Extend) { HostVectorSharedPtr hosts( new HostVector({makeTestHost(info, "tcp://127.0.0.1:80", *time_source)})); HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared(); + HostMapConstSharedPtr fake_cross_priority_host_map = std::make_shared(); { HostVector hosts_added{hosts->front()}; HostVector hosts_removed{}; - priority_set.updateHosts(1, - updateHostsParams(hosts, hosts_per_locality, - std::make_shared(*hosts), - hosts_per_locality), - {}, hosts_added, hosts_removed, absl::nullopt); + priority_set.updateHosts( + 1, + updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt, fake_cross_priority_host_map); } EXPECT_EQ(1, priority_changes); EXPECT_EQ(1, membership_changes); EXPECT_EQ(last_priority, 1); EXPECT_EQ(1, priority_set.hostSetsPerPriority()[1]->hosts().size()); + // Simply verify the set and get the cross-priority host map is working properly in the priority + // set. + EXPECT_EQ(fake_cross_priority_host_map.get(), priority_set.crossPriorityHostMap().get()); + // Test iteration. int i = 0; for (auto& host_set : priority_set.hostSetsPerPriority()) { @@ -2254,13 +2334,79 @@ TEST(PrioritySet, Extend) { EXPECT_EQ(2, membership_changes); } +// Helper class used to test MainPrioritySetImpl. +class TestMainPrioritySetImpl : public MainPrioritySetImpl { +public: + HostMapConstSharedPtr constHostMapForTest() { return const_cross_priority_host_map_; } + HostMapSharedPtr mutableHostMapForTest() { return mutable_cross_priority_host_map_; } +}; + +// Test that the priority set in the main thread can work correctly. +TEST(PrioritySet, MainPrioritySetTest) { + TestMainPrioritySetImpl priority_set; + priority_set.getOrCreateHostSet(0); + + std::shared_ptr info{new NiceMock()}; + auto time_source = std::make_unique>(); + HostVectorSharedPtr hosts( + new HostVector({makeTestHost(info, "tcp://127.0.0.1:80", *time_source)})); + HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared(); + + // The host map is initially empty or null. + EXPECT_TRUE(priority_set.constHostMapForTest()->empty()); + EXPECT_EQ(nullptr, priority_set.mutableHostMapForTest().get()); + + { + HostVector hosts_added{hosts->front()}; + HostVector hosts_removed{}; + + priority_set.updateHosts(1, + updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); + } + + // Only mutable host map can be updated directly. Read only host map will not be updated before + // `crossPriorityHostMap` is called. + EXPECT_TRUE(priority_set.constHostMapForTest()->empty()); + EXPECT_FALSE(priority_set.mutableHostMapForTest()->empty()); + + // Mutable host map will be moved to read only host map after `crossPriorityHostMap` is called. + HostMapSharedPtr host_map = priority_set.mutableHostMapForTest(); + EXPECT_EQ(host_map.get(), priority_set.crossPriorityHostMap().get()); + EXPECT_EQ(nullptr, priority_set.mutableHostMapForTest().get()); + + { + HostVector hosts_added{}; + HostVector hosts_removed{hosts->front()}; + + priority_set.updateHosts(1, + updateHostsParams(hosts, hosts_per_locality, + std::make_shared(*hosts), + hosts_per_locality), + {}, hosts_added, hosts_removed, absl::nullopt); + } + + // New mutable host map will be created and all update will be applied to new mutable host map. + // Read only host map will not be updated before `crossPriorityHostMap` is called. + EXPECT_EQ(host_map.get(), priority_set.constHostMapForTest().get()); + EXPECT_TRUE((priority_set.mutableHostMapForTest().get() != nullptr && + priority_set.mutableHostMapForTest().get() != host_map.get())); + + // Again, mutable host map will be moved to read only host map after `crossPriorityHostMap` is + // called. + host_map = priority_set.mutableHostMapForTest(); + EXPECT_EQ(host_map.get(), priority_set.crossPriorityHostMap().get()); + EXPECT_EQ(nullptr, priority_set.mutableHostMapForTest().get()); +} + class ClusterInfoImplTest : public testing::Test { public: ClusterInfoImplTest() : api_(Api::createApiForTest(stats_, random_)) {} - std::unique_ptr makeCluster(const std::string& yaml, - bool avoid_boosting = true) { - cluster_config_ = parseClusterFromV3Yaml(yaml, avoid_boosting); + std::unique_ptr makeCluster(const std::string& yaml) { + cluster_config_ = parseClusterFromV3Yaml(yaml); scope_ = stats_.createScope(fmt::format("cluster.{}.", cluster_config_.alt_stat_name().empty() ? cluster_config_.name() : cluster_config_.alt_stat_name())); @@ -2521,7 +2667,7 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForUnknownFilter) { option: "value" )EOF"; - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, "Didn't find a registered network or http filter or " "protocol options implementation for name: 'no_such_filter'"); } @@ -2729,7 +2875,7 @@ TEST_F(ClusterInfoImplTest, Timeouts) { } { auto cluster2 = makeCluster(yaml + explicit_timeout_new); - EXPECT_THROW_WITH_REGEX(makeCluster(yaml + explicit_timeout_bad, false), EnvoyException, + EXPECT_THROW_WITH_REGEX(makeCluster(yaml + explicit_timeout_bad), EnvoyException, ".*Proto constraint validation failed.*"); } const std::string no_timeout = R"EOF( @@ -2987,13 +3133,13 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithoutOptions) { TestNetworkFilterConfigFactory factory(factoryBase); Registry::InjectFactory registry( factory); - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, "filter envoy.test.filter does not support protocol options"); } { TestHttpFilterConfigFactory factory(factoryBase); Registry::InjectFactory registry(factory); - EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml, false), EnvoyException, + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, "filter envoy.test.filter does not support protocol options"); } } @@ -3131,7 +3277,7 @@ TEST_F(ClusterInfoImplTest, ExtensionProtocolOptionsForFilterWithOptions) { } } -TEST_F(ClusterInfoImplTest, UseDownstreamHttpProtocol) { +TEST_F(ClusterInfoImplTest, UseDownstreamHttpProtocolWithDowngrade) { const std::string yaml = R"EOF( name: name connect_timeout: 0.25s @@ -3148,7 +3294,8 @@ TEST_F(ClusterInfoImplTest, UseDownstreamHttpProtocol) { cluster->info()->upstreamHttpProtocol({Http::Protocol::Http11})[0]); EXPECT_EQ(Http::Protocol::Http2, cluster->info()->upstreamHttpProtocol({Http::Protocol::Http2})[0]); - EXPECT_EQ(Http::Protocol::Http3, + // This will get downgraded because the cluster does not support HTTP/3 + EXPECT_EQ(Http::Protocol::Http2, cluster->info()->upstreamHttpProtocol({Http::Protocol::Http3})[0]); } @@ -3367,6 +3514,58 @@ TEST_F(ClusterInfoImplTest, Http3Auto) { auto_h3->info()->http3Options().quic_protocol_options().max_concurrent_streams().value(), 2); } +TEST_F(ClusterInfoImplTest, UseDownstreamHttpProtocolWithoutDowngrade) { + const std::string yaml = TestEnvironment::substitute(R"EOF( + name: name + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: MAGLEV + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: foo.bar.com + port_value: 443 + transport_socket: + name: envoy.transport_sockets.quic + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport + upstream_tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" + match_subject_alt_names: + - exact: localhost + - exact: 127.0.0.1 + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + use_downstream_protocol_config: + http3_protocol_options: {} + common_http_protocol_options: + idle_timeout: 1s + )EOF", + Network::Address::IpVersion::v4); + auto cluster = makeCluster(yaml); + + EXPECT_EQ(Http::Protocol::Http10, + cluster->info()->upstreamHttpProtocol({Http::Protocol::Http10})[0]); + EXPECT_EQ(Http::Protocol::Http11, + cluster->info()->upstreamHttpProtocol({Http::Protocol::Http11})[0]); + EXPECT_EQ(Http::Protocol::Http2, + cluster->info()->upstreamHttpProtocol({Http::Protocol::Http2})[0]); + EXPECT_EQ(Http::Protocol::Http3, + cluster->info()->upstreamHttpProtocol({Http::Protocol::Http3})[0]); +} + #else TEST_F(ClusterInfoImplTest, Http3BadConfig) { const std::string yaml = TestEnvironment::substitute(R"EOF( diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index e3f3a18a11f72..673d9349d8055 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -55,23 +55,21 @@ inline std::string defaultStaticClusterJson(const std::string& name) { } inline envoy::config::bootstrap::v3::Bootstrap -parseBootstrapFromV3Json(const std::string& json_string, bool avoid_boosting = true) { +parseBootstrapFromV3Json(const std::string& json_string) { envoy::config::bootstrap::v3::Bootstrap bootstrap; - TestUtility::loadFromJson(json_string, bootstrap, true, avoid_boosting); + TestUtility::loadFromJson(json_string, bootstrap); return bootstrap; } -inline envoy::config::cluster::v3::Cluster parseClusterFromV3Json(const std::string& json_string, - bool avoid_boosting = true) { +inline envoy::config::cluster::v3::Cluster parseClusterFromV3Json(const std::string& json_string) { envoy::config::cluster::v3::Cluster cluster; - TestUtility::loadFromJson(json_string, cluster, true, avoid_boosting); + TestUtility::loadFromJson(json_string, cluster); return cluster; } -inline envoy::config::cluster::v3::Cluster parseClusterFromV3Yaml(const std::string& yaml, - bool avoid_boosting = true) { +inline envoy::config::cluster::v3::Cluster parseClusterFromV3Yaml(const std::string& yaml) { envoy::config::cluster::v3::Cluster cluster; - TestUtility::loadFromYaml(yaml, cluster, true, avoid_boosting); + TestUtility::loadFromYaml(yaml, cluster); return cluster; } @@ -166,9 +164,9 @@ makeLocalityWeights(std::initializer_list locality_weights) { } inline envoy::config::core::v3::HealthCheck -parseHealthCheckFromV3Yaml(const std::string& yaml_string, bool avoid_boosting = true) { +parseHealthCheckFromV3Yaml(const std::string& yaml_string) { envoy::config::core::v3::HealthCheck health_check; - TestUtility::loadFromYamlAndValidate(yaml_string, health_check, false, avoid_boosting); + TestUtility::loadFromYamlAndValidate(yaml_string, health_check); return health_check; } diff --git a/test/common/upstream/wrsq_scheduler_test.cc b/test/common/upstream/wrsq_scheduler_test.cc new file mode 100644 index 0000000000000..17c40058b21a7 --- /dev/null +++ b/test/common/upstream/wrsq_scheduler_test.cc @@ -0,0 +1,291 @@ +#include "source/common/upstream/wrsq_scheduler.h" + +#include "test/mocks/common.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::NiceMock; +using testing::Return; + +namespace Envoy { +namespace Upstream { +namespace { + +TEST(WRSQSchedulerTest, Empty) { + NiceMock random; + WRSQScheduler sched(random); + EXPECT_EQ(nullptr, sched.peekAgain([](const double&) { return 1; })); + EXPECT_EQ(nullptr, sched.pickAndAdd([](const double&) { return 1; })); +} + +// Validate we get regular RR behavior when all weights are the same. +TEST(WRSQSchedulerTest, Unweighted) { + NiceMock random; + WRSQScheduler sched(random); + constexpr uint32_t num_entries = 128; + std::shared_ptr entries[num_entries]; + + for (uint32_t i = 0; i < num_entries; ++i) { + entries[i] = std::make_shared(i); + sched.add(1, entries[i]); + } + + for (uint32_t rounds = 0; rounds < 128; ++rounds) { + for (uint32_t i = 0; i < num_entries; ++i) { + auto peek = sched.peekAgain([](const double&) { return 1; }); + auto p = sched.pickAndAdd([](const double&) { return 1; }); + EXPECT_EQ(i, *p); + EXPECT_EQ(*peek, *p); + } + } +} + +// Validate selection probabilities. +TEST(WRSQSchedulerTest, ProbabilityVerification) { + Random::MockRandomGenerator random; + WRSQScheduler sched(random); + constexpr uint32_t num_entries = 16; + std::shared_ptr entries[num_entries]; + uint32_t pick_count[num_entries]; + + double weight_sum = 0; + + for (uint32_t i = 0; i < num_entries; ++i) { + entries[i] = std::make_shared(i); + sched.add(i + 1, entries[i]); + weight_sum += (i + 1); + pick_count[i] = 0; + } + + // If we try every random number between 0 and the weight sum, we should select each object the + // number of times equal to its weight. + for (uint32_t i = 0; i < weight_sum; ++i) { + EXPECT_CALL(random, random()).WillOnce(Return(i)); + auto peek = sched.peekAgain([](const double& x) { return x + 1; }); + auto p = sched.pickAndAdd([](const double& x) { return x + 1; }); + EXPECT_EQ(*p, *peek); + ++pick_count[*p]; + } + + for (uint32_t i = 0; i < num_entries; ++i) { + EXPECT_EQ(i + 1, pick_count[i]); + } +} + +// Validate that expired entries are ignored. +TEST(WRSQSchedulerTest, Expired1) { + Random::MockRandomGenerator random; + WRSQScheduler sched(random); + + auto second_entry = std::make_shared(42); + { + auto first_entry = std::make_shared(37); + sched.add(1000, first_entry); + sched.add(1, second_entry); + } + + EXPECT_CALL(random, random()).WillOnce(Return(0)).WillOnce(Return(299)).WillOnce(Return(1337)); + auto peek = sched.peekAgain({}); + auto p1 = sched.pickAndAdd({}); + auto p2 = sched.pickAndAdd({}); + EXPECT_EQ(*peek, *p1); + EXPECT_EQ(*second_entry, *p1); + EXPECT_EQ(*second_entry, *p2); +} + +// Validate that expired entries on either end of "the good one" are ignored. +TEST(WRSQSchedulerTest, Expired2) { + Random::MockRandomGenerator random; + WRSQScheduler sched(random); + + auto second_entry = std::make_shared(42); + { + auto first_entry = std::make_shared(37); + auto third_entry = std::make_shared(22); + sched.add(1000, first_entry); + sched.add(1, second_entry); + sched.add(100, third_entry); + } + + EXPECT_CALL(random, random()) + .WillOnce(Return(0)) + .WillOnce(Return(299)) + .WillOnce(Return(1337)) + .WillOnce(Return(8675309)); + auto peek = sched.peekAgain({}); + auto p1 = sched.pickAndAdd({}); + auto p2 = sched.pickAndAdd({}); + EXPECT_EQ(*peek, *p1); + EXPECT_EQ(*second_entry, *p1); + EXPECT_EQ(*second_entry, *p2); +} + +// Validate that expired entries are not peeked. +TEST(WRSQSchedulerTest, ExpiredPeek) { + NiceMock random; + WRSQScheduler sched(random); + + { + auto second_entry = std::make_shared(42); + auto first_entry = std::make_shared(37); + sched.add(2, first_entry); + sched.add(1, second_entry); + } + auto third_entry = std::make_shared(99); + sched.add(3, third_entry); + + EXPECT_EQ(*third_entry, *sched.peekAgain({})); +} + +// Validate that expired entries are ignored. +TEST(WRSQSchedulerTest, ExpiredPeekedIsNotPicked) { + NiceMock random; + WRSQScheduler sched(random); + + { + auto second_entry = std::make_shared(42); + auto first_entry = std::make_shared(37); + sched.add(2, first_entry); + sched.add(1, second_entry); + for (int i = 0; i < 3; ++i) { + EXPECT_TRUE(sched.peekAgain({}) != nullptr); + } + } + + EXPECT_TRUE(sched.peekAgain({}) == nullptr); + EXPECT_TRUE(sched.pickAndAdd({}) == nullptr); +} + +// Ensure the multiple values that are peeked are the same ones returned via calls to `pickAndAdd`. +// This test also verifies that the scheduler behavior is vanilla round-robin when all of the +// weights are identical by ensuring the same values are selected across 2 different schedulers. +TEST(WRSQSchedulerTest, ManyPeekahead) { + NiceMock random; + WRSQScheduler sched1(random); + WRSQScheduler sched2(random); + constexpr uint32_t num_entries = 128; + std::shared_ptr entries[num_entries]; + + // Populate the schedulers. + for (uint32_t i = 0; i < num_entries; ++i) { + entries[i] = std::make_shared(i); + sched1.add(1, entries[i]); + sched2.add(1, entries[i]); + } + + // Peek values and store them for comparison later. + std::vector picks; + for (uint32_t rounds = 0; rounds < 10; ++rounds) { + picks.push_back(*sched1.peekAgain({})); + } + + // Verify the picked values are those we peeked earlier. We'll also verify both schedulers are + // returning the same values, since we expect vanilla round-robin behavior. + for (uint32_t rounds = 0; rounds < 10; ++rounds) { + auto p1 = sched1.pickAndAdd({}); + auto p2 = sched2.pickAndAdd({}); + EXPECT_EQ(picks[rounds], *p1); + EXPECT_EQ(*p2, *p1); + } +} + +// Expire all objects and verify nullptr is returned. +TEST(WRSQSchedulerTest, ExpireAll) { + Random::MockRandomGenerator random; + WRSQScheduler sched(random); + + // The weights are small enough that we can just burn through all the relevant random numbers that + // would be generated as long as we hit 12 consecutive numbers for each part. + uint32_t rnum{0}; + + { + // Add objects of the same weight. + auto e1 = std::make_shared(42); + auto e2 = std::make_shared(37); + sched.add(1, e1); + sched.add(1, e2); + + { + auto e3 = std::make_shared(7); + auto e4 = std::make_shared(13); + sched.add(5, e3); + sched.add(5, e4); + + // We've got unexpired values, so we should be able to pick them. While we're at it, we can + // check we're getting objects from both weight queues. + uint32_t weight1pick{0}, weight5pick{0}; + for (int i = 0; i < 1000; ++i) { + EXPECT_CALL(random, random()).WillOnce(Return(rnum++)); + switch (*sched.pickAndAdd({})) { + case 42: + case 37: + ++weight1pick; + break; + case 7: + case 13: + ++weight5pick; + break; + default: + FAIL() << "bogus value returned"; + } + } + EXPECT_GT(weight5pick, 0); + EXPECT_GT(weight1pick, 0); + } + + // Expired the entirety of the high-probability queue. Let's make sure we behave properly by + // expiring them and only returning the unexpired entries. + for (int i = 0; i < 1000; ++i) { + EXPECT_CALL(random, random()).WillRepeatedly(Return(rnum++)); + switch (*sched.peekAgain({})) { + case 42: + case 37: + break; + default: + FAIL() << "bogus value returned"; + } + } + } + + // All values have expired, so only nullptr should be returned. + EXPECT_CALL(random, random()).WillRepeatedly(Return(rnum++)); + EXPECT_EQ(sched.peekAgain({}), nullptr); +} + +// Validate that a new requested weight is honored. +TEST(WRSQSchedulerTest, ChangingWeight) { + NiceMock random; + WRSQScheduler sched(random); + + auto e1 = std::make_shared(123); + auto e2 = std::make_shared(456); + auto e3 = std::make_shared(789); + sched.add(1, e1); + sched.add(0, e2); + + // Expecting only e1 to be picked. Weights are {e1=0, e2=1}. + for (uint32_t rounds = 0; rounds < 128; ++rounds) { + auto peek = sched.peekAgain({}); + auto p = sched.pickAndAdd({}); + EXPECT_EQ(*e1, *p); + EXPECT_EQ(*peek, *p); + } + + // Weights should be unchanged at this point. Still expect to pick e1, but now we'll change it to + // be 0. + auto p = sched.pickAndAdd([](auto) { return 0.0; }); + EXPECT_EQ(*e1, *p); + sched.add(1, e3); + + // Weights are now {e1=0, e2=0, e3=1}. Without changing the weights, e3 should be the one picked + // repeatedly + for (uint32_t rounds = 0; rounds < 128; ++rounds) { + auto p = sched.pickAndAdd({}); + EXPECT_EQ(*e3, *p); + } +} + +} // namespace +} // namespace Upstream +} // namespace Envoy diff --git a/test/config/BUILD b/test/config/BUILD index fe8cf3472c9a0..9e5807cad1712 100644 --- a/test/config/BUILD +++ b/test/config/BUILD @@ -44,3 +44,15 @@ envoy_cc_test_library( "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], ) + +envoy_cc_test_library( + name = "v2_link_hacks", + hdrs = ["v2_link_hacks.h"], + deps = [ + "@envoy_api//envoy/api/v2:pkg_cc_proto", + "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", + "@envoy_api//envoy/service/discovery/v2:pkg_cc_proto", + "@envoy_api//envoy/service/ratelimit/v2:pkg_cc_proto", + ], + alwayslink = 1, +) diff --git a/test/config/utility.cc b/test/config/utility.cc index ac21e0fc31644..4c39561331fa1 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -355,24 +355,19 @@ std::string ConfigHelper::discoveredClustersBootstrap(const std::string& api_typ } // TODO(#6327) cleaner approach to testing with static config. -std::string ConfigHelper::adsBootstrap(const std::string& api_type, - envoy::config::core::v3::ApiVersion resource_api_version, - envoy::config::core::v3::ApiVersion transport_api_version) { +std::string ConfigHelper::adsBootstrap(const std::string& api_type) { // We use this to allow tests to default to having a single API version but override and make // the transport/resource API version distinction when needed. - if (transport_api_version == envoy::config::core::v3::ApiVersion::AUTO) { - transport_api_version = resource_api_version; - } return fmt::format(R"EOF( dynamic_resources: lds_config: - resource_api_version: {1} + resource_api_version: V3 ads: {{}} cds_config: - resource_api_version: {1} + resource_api_version: V3 ads: {{}} ads_config: - transport_api_version: {2} + transport_api_version: V3 api_type: {0} set_node_on_first_message_only: true static_resources: @@ -401,16 +396,13 @@ std::string ConfigHelper::adsBootstrap(const std::string& api_type, - name: envoy.access_loggers.file typed_config: "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog - path: "{3}" + path: "{1}" address: socket_address: address: 127.0.0.1 port_value: 0 )EOF", - api_type, - resource_api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3", - transport_api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3", - Platform::null_device_path); + api_type, Platform::null_device_path); } // TODO(samflattery): bundle this up with buildCluster @@ -440,9 +432,8 @@ ConfigHelper::buildStaticCluster(const std::string& name, int port, const std::s address, port)); } -envoy::config::cluster::v3::Cluster -ConfigHelper::buildCluster(const std::string& name, const std::string& lb_policy, - envoy::config::core::v3::ApiVersion api_version) { +envoy::config::cluster::v3::Cluster ConfigHelper::buildCluster(const std::string& name, + const std::string& lb_policy) { API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; TestUtility::loadFromYaml(fmt::format(R"EOF( name: {} @@ -450,7 +441,7 @@ ConfigHelper::buildCluster(const std::string& name, const std::string& lb_policy type: EDS eds_cluster_config: eds_config: - resource_api_version: {} + resource_api_version: V3 ads: {{}} lb_policy: {} typed_extension_protocol_options: @@ -459,14 +450,13 @@ ConfigHelper::buildCluster(const std::string& name, const std::string& lb_policy explicit_http_config: http2_protocol_options: {{}} )EOF", - name, apiVersionStr(api_version), lb_policy), - cluster, shouldBoost(api_version)); + name, lb_policy), + cluster); return cluster; } -envoy::config::cluster::v3::Cluster -ConfigHelper::buildTlsCluster(const std::string& name, const std::string& lb_policy, - envoy::config::core::v3::ApiVersion api_version) { +envoy::config::cluster::v3::Cluster ConfigHelper::buildTlsCluster(const std::string& name, + const std::string& lb_policy) { API_NO_BOOST(envoy::config::cluster::v3::Cluster) cluster; TestUtility::loadFromYaml( fmt::format(R"EOF( @@ -475,7 +465,7 @@ ConfigHelper::buildTlsCluster(const std::string& name, const std::string& lb_pol type: EDS eds_cluster_config: eds_config: - resource_api_version: {} + resource_api_version: V3 ads: {{}} transport_socket: name: envoy.transport_sockets.tls @@ -492,17 +482,16 @@ ConfigHelper::buildTlsCluster(const std::string& name, const std::string& lb_pol explicit_http_config: http2_protocol_options: {{}} )EOF", - name, apiVersionStr(api_version), + name, TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem"), lb_policy), - cluster, shouldBoost(api_version)); + cluster); return cluster; } envoy::config::endpoint::v3::ClusterLoadAssignment ConfigHelper::buildClusterLoadAssignment(const std::string& name, const std::string& address, - uint32_t port, - envoy::config::core::v3::ApiVersion api_version) { + uint32_t port) { API_NO_BOOST(envoy::config::endpoint::v3::ClusterLoadAssignment) cluster_load_assignment; TestUtility::loadFromYaml(fmt::format(R"EOF( cluster_name: {} @@ -515,14 +504,13 @@ ConfigHelper::buildClusterLoadAssignment(const std::string& name, const std::str port_value: {} )EOF", name, address, port), - cluster_load_assignment, shouldBoost(api_version)); + cluster_load_assignment); return cluster_load_assignment; } envoy::config::listener::v3::Listener ConfigHelper::buildBaseListener(const std::string& name, const std::string& address, - const std::string& filter_chains, - envoy::config::core::v3::ApiVersion api_version) { + const std::string& filter_chains) { API_NO_BOOST(envoy::config::listener::v3::Listener) listener; TestUtility::loadFromYaml(fmt::format( R"EOF( @@ -535,14 +523,14 @@ ConfigHelper::buildBaseListener(const std::string& name, const std::string& addr {} )EOF", name, address, filter_chains), - listener, shouldBoost(api_version)); + listener); return listener; } -envoy::config::listener::v3::Listener -ConfigHelper::buildListener(const std::string& name, const std::string& route_config, - const std::string& address, const std::string& stat_prefix, - envoy::config::core::v3::ApiVersion api_version) { +envoy::config::listener::v3::Listener ConfigHelper::buildListener(const std::string& name, + const std::string& route_config, + const std::string& address, + const std::string& stat_prefix) { std::string hcm = fmt::format( R"EOF( filters: @@ -554,17 +542,16 @@ ConfigHelper::buildListener(const std::string& name, const std::string& route_co rds: route_config_name: {} config_source: - resource_api_version: {} + resource_api_version: V3 ads: {{}} http_filters: [{{ name: envoy.filters.http.router }}] )EOF", - stat_prefix, route_config, apiVersionStr(api_version)); - return buildBaseListener(name, address, hcm, api_version); + stat_prefix, route_config); + return buildBaseListener(name, address, hcm); } envoy::config::route::v3::RouteConfiguration -ConfigHelper::buildRouteConfig(const std::string& name, const std::string& cluster, - envoy::config::core::v3::ApiVersion api_version) { +ConfigHelper::buildRouteConfig(const std::string& name, const std::string& cluster) { API_NO_BOOST(envoy::config::route::v3::RouteConfiguration) route; TestUtility::loadFromYaml(fmt::format(R"EOF( name: "{}" @@ -576,7 +563,7 @@ ConfigHelper::buildRouteConfig(const std::string& name, const std::string& clust route: {{ cluster: "{}" }} )EOF", name, cluster), - route, shouldBoost(api_version)); + route); return route; } diff --git a/test/config/utility.h b/test/config/utility.h index cdfdc80eeb8b3..06865cc36518a 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -144,39 +144,32 @@ class ConfigHelper { // Configuration for L7 proxying, with clusters cluster_1 and cluster_2 meant to be added via CDS. // api_type should be REST, GRPC, or DELTA_GRPC. static std::string discoveredClustersBootstrap(const std::string& api_type); - static std::string adsBootstrap(const std::string& api_type, - envoy::config::core::v3::ApiVersion resource_api_version, - envoy::config::core::v3::ApiVersion transport_api_version = - envoy::config::core::v3::ApiVersion::AUTO); + static std::string adsBootstrap(const std::string& api_type); // Builds a standard Cluster config fragment, with a single endpoint (at address:port). static envoy::config::cluster::v3::Cluster buildStaticCluster(const std::string& name, int port, const std::string& address); // ADS configurations - static envoy::config::cluster::v3::Cluster buildCluster( - const std::string& name, const std::string& lb_policy = "ROUND_ROBIN", - envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + static envoy::config::cluster::v3::Cluster + buildCluster(const std::string& name, const std::string& lb_policy = "ROUND_ROBIN"); - static envoy::config::cluster::v3::Cluster buildTlsCluster( - const std::string& name, const std::string& lb_policy = "ROUND_ROBIN", - envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + static envoy::config::cluster::v3::Cluster + buildTlsCluster(const std::string& name, const std::string& lb_policy = "ROUND_ROBIN"); - static envoy::config::endpoint::v3::ClusterLoadAssignment buildClusterLoadAssignment( - const std::string& name, const std::string& ip_version, uint32_t port, - envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + static envoy::config::endpoint::v3::ClusterLoadAssignment + buildClusterLoadAssignment(const std::string& name, const std::string& ip_version, uint32_t port); - static envoy::config::listener::v3::Listener buildBaseListener( - const std::string& name, const std::string& address, const std::string& filter_chains = "", - envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + static envoy::config::listener::v3::Listener + buildBaseListener(const std::string& name, const std::string& address, + const std::string& filter_chains = ""); - static envoy::config::listener::v3::Listener buildListener( - const std::string& name, const std::string& route_config, const std::string& address, - const std::string& stat_prefix, - envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + static envoy::config::listener::v3::Listener buildListener(const std::string& name, + const std::string& route_config, + const std::string& address, + const std::string& stat_prefix); - static envoy::config::route::v3::RouteConfiguration buildRouteConfig( - const std::string& name, const std::string& cluster, - envoy::config::core::v3::ApiVersion api_version = envoy::config::core::v3::ApiVersion::V3); + static envoy::config::route::v3::RouteConfiguration buildRouteConfig(const std::string& name, + const std::string& cluster); // Builds a standard Endpoint suitable for population by finalize(). static envoy::config::endpoint::v3::Endpoint buildEndpoint(const std::string& address); @@ -338,14 +331,6 @@ class ConfigHelper { size_t http3_max_stream_receive_window); private: - static bool shouldBoost(envoy::config::core::v3::ApiVersion api_version) { - return api_version == envoy::config::core::v3::ApiVersion::V2; - } - - static std::string apiVersionStr(envoy::config::core::v3::ApiVersion api_version) { - return api_version == envoy::config::core::v3::ApiVersion::V2 ? "V2" : "V3"; - } - // Load the first HCM struct from the first listener into a parsed proto. bool loadHttpConnectionManager(HttpConnectionManager& hcm); // Take the contents of the provided HCM proto and stuff them into the first HCM diff --git a/test/config/v2_link_hacks.h b/test/config/v2_link_hacks.h new file mode 100644 index 0000000000000..63108af3435ae --- /dev/null +++ b/test/config/v2_link_hacks.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/api/v2/cds.pb.h" +#include "envoy/api/v2/discovery.pb.h" +#include "envoy/api/v2/eds.pb.h" +#include "envoy/api/v2/lds.pb.h" +#include "envoy/api/v2/rds.pb.h" +#include "envoy/api/v2/srds.pb.h" +#include "envoy/config/bootstrap/v2/bootstrap.pb.h" +#include "envoy/service/discovery/v2/ads.pb.h" +#include "envoy/service/discovery/v2/hds.pb.h" +#include "envoy/service/discovery/v2/rtds.pb.h" +#include "envoy/service/discovery/v2/sds.pb.h" +#include "envoy/service/ratelimit/v2/rls.pb.h" + +namespace Envoy { + +// Hack to force linking of the service: https://github.com/google/protobuf/issues/4221. +// This file should be included ONLY if this hack is required. +const envoy::service::discovery::v2::AdsDummy _ads_dummy_v2; +const envoy::service::ratelimit::v2::RateLimitRequest _rls_dummy_v2; +const envoy::service::discovery::v2::SdsDummy _sds_dummy_v2; +const envoy::service::discovery::v2::RtdsDummy _tds_dummy_v2; +const envoy::api::v2::LdsDummy _lds_dummy_v2; +const envoy::api::v2::RdsDummy _rds_dummy_v2; +const envoy::api::v2::CdsDummy _cds_dummy_v2; +const envoy::api::v2::EdsDummy _eds_dummy_v2; +const envoy::api::v2::SrdsDummy _srds_dummy_v2; +const envoy::config::bootstrap::v2::Bootstrap _bootstrap_dummy_v2; +const envoy::service::discovery::v2::Capability _hds_dummy_v2; + +} // namespace Envoy diff --git a/test/config_test/BUILD b/test/config_test/BUILD index 929fa0eb3b5c7..7104a8d336b85 100644 --- a/test/config_test/BUILD +++ b/test/config_test/BUILD @@ -13,24 +13,33 @@ envoy_package() exports_files(["example_configs_test_setup.sh"]) -envoy_cc_test( - name = "example_configs_test", - size = "large", +envoy_cc_test_library( + name = "example_configs_test_lib", srcs = [ "example_configs_test.cc", ], - data = [ - "example_configs_test_setup.sh", - "//configs:example_configs", - ], deps = [ ":config_test_lib", "//source/common/filesystem:filesystem_lib", + "//test/config:v2_link_hacks", "//test/test_common:environment_lib", "//test/test_common:utility_lib", ], ) +envoy_cc_test( + name = "example_configs_test", + size = "large", + data = [ + "example_configs_test_setup.sh", + "//configs:example_configs", + ], + env = {"EXAMPLE_CONFIGS_TAR_PATH": "envoy/configs/example_configs.tar"}, + deps = [ + ":example_configs_test_lib", + ], +) + envoy_cc_test_library( name = "config_test_lib", srcs = ["config_test.cc"], diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 5dc666dd6f476..876a501bd56ad 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -99,7 +99,7 @@ class ConfigTest { envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig( bootstrap, options_, server_.messageValidationContext().staticValidationVisitor(), *api_); - Server::Configuration::InitialImpl initial_config(bootstrap, options); + Server::Configuration::InitialImpl initial_config(bootstrap); Server::Configuration::MainImpl main_config; cluster_manager_factory_ = std::make_unique( @@ -230,16 +230,12 @@ uint32_t run(const std::string& directory) { } void loadVersionedBootstrapFile(const std::string& filename, - envoy::config::bootstrap::v3::Bootstrap& bootstrap_message, - absl::optional bootstrap_version) { + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message) { Api::ApiPtr api = Api::createApiForTest(); OptionsImpl options( Envoy::Server::createTestOptionsImpl(filename, "", Network::Address::IpVersion::v6)); // Avoid contention issues with other tests over the hot restart domain socket. options.setHotRestartDisabled(true); - if (bootstrap_version.has_value()) { - options.setBootstrapVersion(*bootstrap_version); - } Server::InstanceUtil::loadBootstrapConfig(bootstrap_message, options, ProtobufMessage::getStrictValidationVisitor(), *api); } diff --git a/test/config_test/config_test.h b/test/config_test/config_test.h index 551fffadce33e..e4ee26501fcc5 100644 --- a/test/config_test/config_test.h +++ b/test/config_test/config_test.h @@ -27,8 +27,7 @@ void testMerge(); * given bootstrap protobuf message using the server's loadBootstrapConfig. */ void loadVersionedBootstrapFile(const std::string& filename, - envoy::config::bootstrap::v3::Bootstrap& bootstrap_message, - absl::optional bootstrap_version = absl::nullopt); + envoy::config::bootstrap::v3::Bootstrap& bootstrap_message); /** * Loads the given bootstrap proto into the given bootstrap protobuf message diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index 37e0d561d8dc1..02d0504fbd9a4 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -1,5 +1,6 @@ #include "source/common/filesystem/filesystem_impl.h" +#include "test/config/v2_link_hacks.h" #include "test/config_test/config_test.h" #include "test/test_common/environment.h" #include "test/test_common/utility.h" @@ -27,7 +28,9 @@ TEST(ExampleConfigsTest, All) { EXPECT_EQ(config_file_count, ConfigTest::run(directory)); - ConfigTest::testMerge(); + if (std::getenv("DISABLE_TEST_MERGE") == nullptr) { + ConfigTest::testMerge(); + } // Return to the original working directory, otherwise "bazel.coverage" breaks (...but why?). RELEASE_ASSERT(::chdir(cwd) == 0, ""); diff --git a/test/config_test/example_configs_test_setup.sh b/test/config_test/example_configs_test_setup.sh index 49c2066add89d..4271f90085303 100755 --- a/test/config_test/example_configs_test_setup.sh +++ b/test/config_test/example_configs_test_setup.sh @@ -4,7 +4,7 @@ set -e DIR="$TEST_TMPDIR"/test/config_test mkdir -p "$DIR" -tar -xvf "$TEST_SRCDIR"/envoy/configs/example_configs.tar -C "$DIR" +tar -xvf "$TEST_SRCDIR"/"$EXAMPLE_CONFIGS_TAR_PATH" -C "$DIR" # find uses full path to prevent using Windows find on Windows. /usr/bin/find "$DIR" -type f | grep -c .yaml > "$TEST_TMPDIR"/config-file-count.txt diff --git a/test/extensions/access_loggers/common/grpc_access_logger_test.cc b/test/extensions/access_loggers/common/grpc_access_logger_test.cc index 57f5f2278624a..f2e125df17e06 100644 --- a/test/extensions/access_loggers/common/grpc_access_logger_test.cc +++ b/test/extensions/access_loggers/common/grpc_access_logger_test.cc @@ -33,14 +33,12 @@ namespace { constexpr std::chrono::milliseconds FlushInterval(10); constexpr char MOCK_HTTP_LOG_FIELD_NAME[] = "http_log_entry"; constexpr char MOCK_TCP_LOG_FIELD_NAME[] = "tcp_log_entry"; -constexpr auto TRANSPORT_API_VERSION = envoy::config::core::v3::ApiVersion::AUTO; const Protobuf::MethodDescriptor& mockMethodDescriptor() { // The mock logger doesn't have its own API, but we only care about the method descriptor so we // use the ALS protos. - return Grpc::VersionedMethods("envoy.service.accesslog.v3.AccessLogService.StreamAccessLogs", - "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs") - .getMethodDescriptorForVersion(TRANSPORT_API_VERSION); + return *Protobuf::DescriptorPool::generated_pool()->FindMethodByName( + "envoy.service.accesslog.v3.AccessLogService.StreamAccessLogs"); } // We don't care about the actual log entries, as this logger just adds them to the proto, but we @@ -54,11 +52,9 @@ class MockGrpcAccessLoggerImpl std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, Stats::Scope& scope, std::string access_log_prefix, - const Protobuf::MethodDescriptor& service_method, - envoy::config::core::v3::ApiVersion transport_api_version) + const Protobuf::MethodDescriptor& service_method) : GrpcAccessLogger(std::move(client), buffer_flush_interval_msec, max_buffer_size_bytes, - dispatcher, scope, access_log_prefix, service_method, - transport_api_version) {} + dispatcher, scope, access_log_prefix, service_method) {} int numInits() const { return num_inits_; } @@ -121,8 +117,7 @@ class GrpcAccessLogTest : public testing::Test { EXPECT_CALL(*timer_, enableTimer(buffer_flush_interval_msec, _)); logger_ = std::make_unique( Grpc::RawAsyncClientPtr{async_client_}, buffer_flush_interval_msec, buffer_size_bytes, - dispatcher_, stats_store_, "mock_access_log_prefix.", mockMethodDescriptor(), - TRANSPORT_API_VERSION); + dispatcher_, stats_store_, "mock_access_log_prefix.", mockMethodDescriptor()); } void expectStreamStart(MockAccessLogStream& stream, AccessLogCallbacks** callbacks_to_set) { @@ -324,13 +319,13 @@ class MockGrpcAccessLoggerCache private: // Common::GrpcAccessLoggerCache MockGrpcAccessLoggerImpl::SharedPtr - createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion, const Grpc::RawAsyncClientSharedPtr& client, + createLogger(const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig&, + const Grpc::RawAsyncClientSharedPtr& client, std::chrono::milliseconds buffer_flush_interval_msec, uint64_t max_buffer_size_bytes, Event::Dispatcher& dispatcher, Stats::Scope& scope) override { return std::make_shared( std::move(client), buffer_flush_interval_msec, max_buffer_size_bytes, dispatcher, scope, - "mock_access_log_prefix.", mockMethodDescriptor(), config.transport_api_version()); + "mock_access_log_prefix.", mockMethodDescriptor()); } }; @@ -366,36 +361,31 @@ TEST_F(GrpcAccessLoggerCacheTest, Deduplication) { config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("cluster-1"); expectClientCreation(); - MockGrpcAccessLoggerImpl::SharedPtr logger1 = logger_cache_.getOrCreateLogger( - config, envoy::config::core::v3::ApiVersion::V3, Common::GrpcAccessLoggerType::HTTP, scope); + MockGrpcAccessLoggerImpl::SharedPtr logger1 = + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope); EXPECT_EQ(logger1, - logger_cache_.getOrCreateLogger(config, envoy::config::core::v3::ApiVersion::V3, - Common::GrpcAccessLoggerType::HTTP, scope)); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); // Do not deduplicate different types of logger expectClientCreation(); EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, envoy::config::core::v3::ApiVersion::V3, - Common::GrpcAccessLoggerType::TCP, scope)); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::TCP, scope)); // Changing log name leads to another logger. config.set_log_name("log-2"); expectClientCreation(); EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, envoy::config::core::v3::ApiVersion::V3, - Common::GrpcAccessLoggerType::HTTP, scope)); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); config.set_log_name("log-1"); EXPECT_EQ(logger1, - logger_cache_.getOrCreateLogger(config, envoy::config::core::v3::ApiVersion::V3, - Common::GrpcAccessLoggerType::HTTP, scope)); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); // Changing cluster name leads to another logger. config.mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("cluster-2"); expectClientCreation(); EXPECT_NE(logger1, - logger_cache_.getOrCreateLogger(config, envoy::config::core::v3::ApiVersion::V3, - Common::GrpcAccessLoggerType::HTTP, scope)); + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope)); } } // namespace diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index eec260b9c347b..3e5e4f58f9008 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -72,7 +72,7 @@ class GrpcAccessLoggerImplTest : public testing::Test { EXPECT_CALL(*timer_, enableTimer(_, _)); logger_ = std::make_unique( Grpc::RawAsyncClientPtr{async_client_}, "test_log_name", FlushInterval, BUFFER_SIZE_BYTES, - dispatcher_, local_info_, stats_store_, envoy::config::core::v3::ApiVersion::AUTO); + dispatcher_, local_info_, stats_store_); } Grpc::MockAsyncClient* async_client_; @@ -155,8 +155,8 @@ TEST_F(GrpcAccessLoggerCacheImplTest, LoggerCreation) { // Force a flush for every log entry. config.mutable_buffer_size_bytes()->set_value(BUFFER_SIZE_BYTES); - GrpcAccessLoggerSharedPtr logger = logger_cache_.getOrCreateLogger( - config, envoy::config::core::v3::ApiVersion::V3, Common::GrpcAccessLoggerType::HTTP, scope_); + GrpcAccessLoggerSharedPtr logger = + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope_); // Note that the local info node() method is mocked, so the node is not really configurable. grpc_access_logger_impl_test_helper_.expectStreamMessage(R"EOF( identifier: diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index 54e012875513c..8c78f302f5612 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -45,8 +45,7 @@ class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion, Common::GrpcAccessLoggerType logger_type, - Stats::Scope& scope)); + Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); }; class HttpGrpcAccessLogTest : public testing::Test { @@ -59,12 +58,11 @@ class HttpGrpcAccessLogTest : public testing::Test { config_.mutable_common_config()->add_filter_state_objects_to_log("serialized"); config_.mutable_common_config()->set_transport_api_version( envoy::config::core::v3::ApiVersion::V3); - EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _, _)) + EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _)) .WillOnce( [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion, Common::GrpcAccessLoggerType logger_type, - Stats::Scope&) { + Common::GrpcAccessLoggerType logger_type, Stats::Scope&) { EXPECT_EQ(config.DebugString(), config_.common_config().DebugString()); EXPECT_EQ(Common::GrpcAccessLoggerType::HTTP, logger_type); return logger_; @@ -151,7 +149,7 @@ TEST_F(HttpGrpcAccessLogTest, Marshalling) { stream_info.start_time_ = SystemTime(1h); stream_info.start_time_monotonic_ = MonotonicTime(1h); stream_info.last_downstream_tx_byte_sent_ = 2ms; - stream_info.downstream_address_provider_->setLocalAddress( + stream_info.downstream_connection_info_provider_->setLocalAddress( std::make_shared("/foo")); (*stream_info.metadata_.mutable_filter_metadata())["foo"] = ProtobufWkt::Struct(); stream_info.filter_state_->setData("string_accessor", @@ -388,8 +386,8 @@ response: {} const std::string tlsVersion = "TLSv1.3"; ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2CC0)); - stream_info.setDownstreamSslConnection(connection_info); - stream_info.downstream_address_provider_->setRequestedServerName("sni"); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); + stream_info.downstream_connection_info_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, @@ -448,8 +446,8 @@ response: {} const std::string tlsVersion = "TLSv1.2"; ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F)); - stream_info.setDownstreamSslConnection(connection_info); - stream_info.downstream_address_provider_->setRequestedServerName("sni"); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); + stream_info.downstream_connection_info_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, @@ -498,8 +496,8 @@ response: {} const std::string tlsVersion = "TLSv1.1"; ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F)); - stream_info.setDownstreamSslConnection(connection_info); - stream_info.downstream_address_provider_->setRequestedServerName("sni"); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); + stream_info.downstream_connection_info_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, @@ -548,8 +546,8 @@ response: {} const std::string tlsVersion = "TLSv1"; ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F)); - stream_info.setDownstreamSslConnection(connection_info); - stream_info.downstream_address_provider_->setRequestedServerName("sni"); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); + stream_info.downstream_connection_info_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, @@ -598,8 +596,8 @@ response: {} const std::string tlsVersion = "TLSv1.4"; ON_CALL(*connection_info, tlsVersion()).WillByDefault(ReturnRef(tlsVersion)); ON_CALL(*connection_info, ciphersuiteId()).WillByDefault(Return(0x2F)); - stream_info.setDownstreamSslConnection(connection_info); - stream_info.downstream_address_provider_->setRequestedServerName("sni"); + stream_info.downstream_connection_info_provider_->setSslConnection(connection_info); + stream_info.downstream_connection_info_provider_->setRequestedServerName("sni"); Http::TestRequestHeaderMapImpl request_headers{ {":method", "WHACKADOO"}, diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc index 0699fc5e81f9d..75b064169115b 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_integration_test.cc @@ -19,7 +19,7 @@ using testing::AssertionResult; namespace Envoy { namespace { -class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, +class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { public: AccessLogIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) {} @@ -30,9 +30,6 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara } void initialize() override { - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { - config_helper_.enableDeprecatedV2Api(); - } config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* accesslog_cluster = bootstrap.mutable_static_resources()->add_clusters(); accesslog_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); @@ -50,7 +47,7 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config; auto* common_config = config.mutable_common_config(); common_config->set_log_name("foo"); - common_config->set_transport_api_version(apiVersion()); + common_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); setGrpcService(*common_config->mutable_grpc_service(), "accesslog", fake_upstreams_.back()->localAddress()); access_log->mutable_typed_config()->PackFrom(config); @@ -59,14 +56,6 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara HttpIntegrationTest::initialize(); } - static ProtobufTypes::MessagePtr scrubHiddenEnvoyDeprecated(const Protobuf::Message& message) { - ProtobufTypes::MessagePtr mutable_clone; - mutable_clone.reset(message.New()); - mutable_clone->MergeFrom(message); - Config::VersionUtil::scrubHiddenEnvoyDeprecated(*mutable_clone); - return mutable_clone; - } - ABSL_MUST_USE_RESULT AssertionResult waitForAccessLogConnection() { return fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_access_log_connection_); @@ -82,8 +71,7 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); EXPECT_EQ("POST", access_log_request_->headers().getMethodValue()); - EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.accesslog.{}.AccessLogService", - "StreamAccessLogs", apiVersion()), + EXPECT_EQ("/envoy.service.accesslog.v3.AccessLogService/StreamAccessLogs", access_log_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", access_log_request_->headers().getContentTypeValue()); @@ -105,8 +93,6 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara node->clear_extensions(); node->clear_user_agent_build_version(); } - Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg); - Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg); EXPECT_THAT(request_msg, ProtoEq(expected_request_msg)); return AssertionSuccess(); } @@ -125,12 +111,11 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, AccessLogIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); // Test a basic full access logging flow. TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; testRouterNotFound(); ASSERT_TRUE(waitForAccessLogConnection()); ASSERT_TRUE(waitForAccessLogStream()); diff --git a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc index 2bec4fd81fcce..63ddc3f709370 100644 --- a/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_grpc_access_log_integration_test.cc @@ -24,7 +24,7 @@ void clearPort(envoy::config::core::v3::Address& address) { address.mutable_socket_address()->clear_port_specifier(); } -class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, +class TcpGrpcAccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public BaseIntegrationTest { public: TcpGrpcAccessLogIntegrationTest() @@ -38,9 +38,6 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrat } void initialize() override { - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { - config_helper_.enableDeprecatedV2Api(); - } config_helper_.renameListener("tcp_proxy"); config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* accesslog_cluster = bootstrap.mutable_static_resources()->add_clusters(); @@ -56,7 +53,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrat envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig access_log_config; auto* common_config = access_log_config.mutable_common_config(); common_config->set_log_name("foo"); - common_config->set_transport_api_version(apiVersion()); + common_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); setGrpcService(*common_config->mutable_grpc_service(), "accesslog", fake_upstreams_.back()->localAddress()); access_log->mutable_typed_config()->PackFrom(access_log_config); @@ -79,8 +76,7 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrat envoy::service::accesslog::v3::StreamAccessLogsMessage request_msg; VERIFY_ASSERTION(access_log_request_->waitForGrpcMessage(*dispatcher_, request_msg)); EXPECT_EQ("POST", access_log_request_->headers().getMethodValue()); - EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.accesslog.{}.AccessLogService", - "StreamAccessLogs", apiVersion()), + EXPECT_EQ("/envoy.service.accesslog.v3.AccessLogService/StreamAccessLogs", access_log_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", access_log_request_->headers().getContentTypeValue()); @@ -103,8 +99,6 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrat node->clear_extensions(); node->clear_user_agent_build_version(); } - Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg); - Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg); EXPECT_TRUE(TestUtility::protoEqual(request_msg, expected_request_msg, /*ignore_repeated_field_ordering=*/false)); @@ -126,12 +120,11 @@ class TcpGrpcAccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrat }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, TcpGrpcAccessLogIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); // Test a basic full access logging flow. TEST_P(TcpGrpcAccessLogIntegrationTest, BasicAccessLogFlow) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); diff --git a/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc b/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc index d169f040f3b9f..f815e3a3bd981 100644 --- a/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc +++ b/test/extensions/access_loggers/open_telemetry/access_log_impl_test.cc @@ -52,8 +52,7 @@ class MockGrpcAccessLoggerCache : public GrpcAccessLoggerCache { // GrpcAccessLoggerCache MOCK_METHOD(GrpcAccessLoggerSharedPtr, getOrCreateLogger, (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion, Common::GrpcAccessLoggerType logger_type, - Stats::Scope& scope)); + Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); }; class AccessLogTest : public testing::Test { @@ -83,12 +82,11 @@ string_value: "x-request-header: %REQ(x-request-header)%, protocol: %PROTOCOL%" config_.mutable_common_config()->set_log_name("test_log"); config_.mutable_common_config()->set_transport_api_version( envoy::config::core::v3::ApiVersion::V3); - EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _, _)) + EXPECT_CALL(*logger_cache_, getOrCreateLogger(_, _, _)) .WillOnce( [this](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, - envoy::config::core::v3::ApiVersion, Common::GrpcAccessLoggerType logger_type, - Stats::Scope&) { + Common::GrpcAccessLoggerType logger_type, Stats::Scope&) { EXPECT_EQ(config.DebugString(), config_.common_config().DebugString()); EXPECT_EQ(Common::GrpcAccessLoggerType::HTTP, logger_type); return logger_; diff --git a/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc b/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc index 66c62fccf311f..ce52155a6dc96 100644 --- a/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc +++ b/test/extensions/access_loggers/open_telemetry/access_log_integration_test.cc @@ -46,7 +46,7 @@ constexpr char EXPECTED_REQUEST_MESSAGE[] = R"EOF( namespace Envoy { namespace { -class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, +class AccessLogIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { public: AccessLogIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) {} @@ -57,9 +57,6 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara } void initialize() override { - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { - config_helper_.enableDeprecatedV2Api(); - } config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* accesslog_cluster = bootstrap.mutable_static_resources()->add_clusters(); accesslog_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); @@ -78,7 +75,7 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara config; auto* common_config = config.mutable_common_config(); common_config->set_log_name("foo"); - common_config->set_transport_api_version(apiVersion()); + common_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); setGrpcService(*common_config->mutable_grpc_service(), "accesslog", fake_upstreams_.back()->localAddress()); auto* body_config = config.mutable_body(); @@ -93,14 +90,6 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara HttpIntegrationTest::initialize(); } - static ProtobufTypes::MessagePtr scrubHiddenEnvoyDeprecated(const Protobuf::Message& message) { - ProtobufTypes::MessagePtr mutable_clone; - mutable_clone.reset(message.New()); - mutable_clone->MergeFrom(message); - Config::VersionUtil::scrubHiddenEnvoyDeprecated(*mutable_clone); - return mutable_clone; - } - ABSL_MUST_USE_RESULT AssertionResult waitForAccessLogConnection() { return fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_access_log_connection_); @@ -128,8 +117,6 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara ->mutable_logs(0) ->clear_time_unix_nano(); - Config::VersionUtil::scrubHiddenEnvoyDeprecated(request_msg); - Config::VersionUtil::scrubHiddenEnvoyDeprecated(expected_request_msg); EXPECT_TRUE(TestUtility::protoEqual(request_msg, expected_request_msg, /*ignore_repeated_field_ordering=*/false)); return AssertionSuccess(); @@ -149,12 +136,11 @@ class AccessLogIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, AccessLogIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); // Test a basic full access logging flow. TEST_P(AccessLogIntegrationTest, BasicAccessLogFlow) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; testRouterNotFound(); ASSERT_TRUE(waitForAccessLogConnection()); ASSERT_TRUE(waitForAccessLogStream()); diff --git a/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc index ecd8d6b96a83e..850ae1dfa4cdf 100644 --- a/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc @@ -81,7 +81,7 @@ class GrpcAccessLoggerImplTest : public testing::Test { EXPECT_CALL(*timer_, enableTimer(_, _)); logger_ = std::make_unique( Grpc::RawAsyncClientPtr{async_client_}, "test_log_name", FlushInterval, BUFFER_SIZE_BYTES, - dispatcher_, local_info_, stats_store_, envoy::config::core::v3::ApiVersion::V3); + dispatcher_, local_info_, stats_store_); } Grpc::MockAsyncClient* async_client_; @@ -178,8 +178,8 @@ TEST_F(GrpcAccessLoggerCacheImplTest, LoggerCreation) { // Force a flush for every log entry. config.mutable_buffer_size_bytes()->set_value(BUFFER_SIZE_BYTES); - GrpcAccessLoggerSharedPtr logger = logger_cache_.getOrCreateLogger( - config, envoy::config::core::v3::ApiVersion::V3, Common::GrpcAccessLoggerType::HTTP, scope_); + GrpcAccessLoggerSharedPtr logger = + logger_cache_.getOrCreateLogger(config, Common::GrpcAccessLoggerType::HTTP, scope_); grpc_access_logger_impl_test_helper_.expectStreamMessage(R"EOF( resource_logs: resource: diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD index 4a9fd793aaa9b..03a8652831fe2 100644 --- a/test/extensions/clusters/aggregate/BUILD +++ b/test/extensions/clusters/aggregate/BUILD @@ -66,6 +66,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/tcp_proxy:config", "//source/extensions/retry/priority/previous_priorities:config", "//test/common/grpc:grpc_client_integration_lib", + "//test/config:v2_link_hacks", "//test/integration:http_integration_lib", "//test/integration:integration_lib", "//test/mocks/runtime:runtime_mocks", diff --git a/test/extensions/clusters/aggregate/cluster_integration_test.cc b/test/extensions/clusters/aggregate/cluster_integration_test.cc index e7dbaecfbbccb..aad8f587b5a32 100644 --- a/test/extensions/clusters/aggregate/cluster_integration_test.cc +++ b/test/extensions/clusters/aggregate/cluster_integration_test.cc @@ -7,6 +7,7 @@ #include "source/common/protobuf/utility.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" #include "test/test_common/network_utility.h" diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index 4842befab521f..12d7ab43ae102 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -95,7 +95,6 @@ class AggregateClusterTest : public Event::TestUsingSimulatedTime, public testin Upstream::parseClusterFromV3Yaml(yaml_config); envoy::extensions::clusters::aggregate::v3::ClusterConfig config; Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(), - ProtobufWkt::Struct::default_instance(), ProtobufMessage::getStrictValidationVisitor(), config); Stats::ScopePtr scope = stats_store_.createScope("cluster.name."); Server::Configuration::TransportSocketFactoryContextImpl factory_context( diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index 595be9f596d86..097dcf993cd4d 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -37,7 +37,6 @@ class ClusterTest : public testing::Test, Upstream::parseClusterFromV3Yaml(yaml_config); envoy::extensions::clusters::dynamic_forward_proxy::v3::ClusterConfig config; Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(), - ProtobufWkt::Struct::default_instance(), ProtobufMessage::getStrictValidationVisitor(), config); Stats::ScopePtr scope = stats_store_.createScope("cluster.name."); Server::Configuration::TransportSocketFactoryContextImpl factory_context( @@ -203,9 +202,9 @@ TEST_F(ClusterTest, PopulatedCache) { class ClusterFactoryTest : public testing::Test { protected: - void createCluster(const std::string& yaml_config, bool avoid_boosting = true) { + void createCluster(const std::string& yaml_config) { envoy::config::cluster::v3::Cluster cluster_config = - Upstream::parseClusterFromV3Yaml(yaml_config, avoid_boosting); + Upstream::parseClusterFromV3Yaml(yaml_config); Upstream::ClusterFactoryContextImpl cluster_factory_context( cm_, stats_store_, tls_, nullptr, ssl_context_manager_, runtime_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, nullptr, true, validation_visitor_, *api_, @@ -249,8 +248,8 @@ upstream_http_protocol_options: {} EXPECT_THROW_WITH_MESSAGE( createCluster(yaml_config), EnvoyException, - "dynamic_forward_proxy cluster must have auto_sni and auto_san_validation true when " - "configured with upstream_http_protocol_options"); + "dynamic_forward_proxy cluster must have auto_sni and auto_san_validation true unless " + "allow_insecure_cluster_options is set."); } TEST_F(ClusterFactoryTest, InsecureUpstreamHttpProtocolOptions) { diff --git a/test/extensions/clusters/redis/mocks.h b/test/extensions/clusters/redis/mocks.h index bd2409e241460..1f1531f45be6c 100644 --- a/test/extensions/clusters/redis/mocks.h +++ b/test/extensions/clusters/redis/mocks.h @@ -21,7 +21,7 @@ class MockClusterSlotUpdateCallBack : public ClusterSlotUpdateCallBack { MockClusterSlotUpdateCallBack(); ~MockClusterSlotUpdateCallBack() override = default; - MOCK_METHOD(bool, onClusterSlotUpdate, (ClusterSlotsPtr&&, Upstream::HostMap)); + MOCK_METHOD(bool, onClusterSlotUpdate, (ClusterSlotsPtr&&, Upstream::HostMap&)); MOCK_METHOD(void, onHostHealthUpdate, ()); }; diff --git a/test/extensions/clusters/redis/redis_cluster_test.cc b/test/extensions/clusters/redis/redis_cluster_test.cc index 526c10a3039f2..8c1fc3cc9f352 100644 --- a/test/extensions/clusters/redis/redis_cluster_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_test.cc @@ -95,11 +95,10 @@ class RedisClusterTest : public testing::Test, return addresses; } - void setupFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { + void setupFromV3Yaml(const std::string& yaml) { expectRedisSessionCreated(); NiceMock cm; - envoy::config::cluster::v3::Cluster cluster_config = - Upstream::parseClusterFromV3Yaml(yaml, avoid_boosting); + envoy::config::cluster::v3::Cluster cluster_config = Upstream::parseClusterFromV3Yaml(yaml); Envoy::Stats::ScopePtr scope = stats_store_.createScope(fmt::format( "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() : cluster_config.alt_stat_name())); @@ -109,7 +108,6 @@ class RedisClusterTest : public testing::Test, envoy::extensions::clusters::redis::v3::RedisClusterConfig config; Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(), - ProtobufWkt::Struct::default_instance(), ProtobufMessage::getStrictValidationVisitor(), config); cluster_callback_ = std::make_shared>(); cluster_ = std::make_shared( @@ -139,7 +137,6 @@ class RedisClusterTest : public testing::Test, envoy::extensions::clusters::redis::v3::RedisClusterConfig config; Config::Utility::translateOpaqueConfig(cluster_config.cluster_type().typed_config(), - ProtobufWkt::Struct::default_instance(), validation_visitor_, config); NiceMock log_manager; diff --git a/test/extensions/common/aws/utility_test.cc b/test/extensions/common/aws/utility_test.cc index 1f115d9baff27..d19f8e220356a 100644 --- a/test/extensions/common/aws/utility_test.cc +++ b/test/extensions/common/aws/utility_test.cc @@ -100,7 +100,8 @@ TEST(UtilityTest, CanonicalizeHeadersDropMutatingHeaders) { // Verify the format of a minimalist canonical request TEST(UtilityTest, MinimalCanonicalRequest) { std::map headers; - const auto request = Utility::createCanonicalRequest("GET", "", headers, "content-hash"); + const auto request = + Utility::createCanonicalRequest("appmesh", "GET", "", headers, "content-hash"); EXPECT_EQ(R"(GET / @@ -112,10 +113,11 @@ content-hash)", TEST(UtilityTest, CanonicalRequestWithQueryString) { const std::map headers; - const auto request = Utility::createCanonicalRequest("GET", "?query", headers, "content-hash"); + const auto request = + Utility::createCanonicalRequest("appmesh", "GET", "?query", headers, "content-hash"); EXPECT_EQ(R"(GET / -query +query= content-hash)", @@ -128,7 +130,8 @@ TEST(UtilityTest, CanonicalRequestWithHeaders) { {"header2", "value2"}, {"header3", "value3"}, }; - const auto request = Utility::createCanonicalRequest("GET", "", headers, "content-hash"); + const auto request = + Utility::createCanonicalRequest("appmesh", "GET", "", headers, "content-hash"); EXPECT_EQ(R"(GET / @@ -141,6 +144,166 @@ content-hash)", request); } +TEST(UtilityTest, CanonicalizePathStringReturnSlash) { + const absl::string_view path = ""; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/", canonical_path); +} + +TEST(UtilityTest, CanonicalizePathStringSlash) { + const absl::string_view path = "/"; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/", canonical_path); +} + +TEST(UtilityTest, CanonicalizePathStringSlashes) { + const absl::string_view path = "///"; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/", canonical_path); +} + +TEST(UtilityTest, CanonicalizePathStringPrefixSlash) { + const absl::string_view path = "test"; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/test", canonical_path); +} + +TEST(UtilityTest, CanonicalizePathStringSuffixSlash) { + const absl::string_view path = "test/"; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/test/", canonical_path); +} + +TEST(UtilityTest, CanonicalizePathStringNormalizeSlash) { + const absl::string_view path = "test////test///"; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/test/test/", canonical_path); +} + +TEST(UtilityTest, CanonicalizePathStringWithEncoding) { + const absl::string_view path = "test$file.txt"; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/test%24file.txt", canonical_path); +} + +TEST(UtilityTest, CanonicalizePathStringWithEncodingSpaces) { + const absl::string_view path = "/test and test/"; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/test%20and%20test/", canonical_path); +} + +TEST(UtilityTest, CanonicalizePathStringWithAlreadyEncodedSpaces) { + const absl::string_view path = "/test%20and%20test/"; + const auto canonical_path = Utility::canonicalizePathString(path, "appmesh"); + EXPECT_EQ("/test%2520and%2520test/", canonical_path); +} + +TEST(UtilityTest, CanonicalizeS3PathStringDoNotNormalizeSlash) { + const absl::string_view path = "/test//test///"; + const auto canonical_path = Utility::canonicalizePathString(path, "s3"); + EXPECT_EQ("/test//test///", canonical_path); +} + +TEST(UtilityTest, CanonicalizeS3PathStringSlashes) { + const absl::string_view path = "///"; + const auto canonical_path = Utility::canonicalizePathString(path, "s3"); + EXPECT_EQ("///", canonical_path); +} + +TEST(UtilityTest, CanonicalizeS3PathStringWithEncoding) { + const absl::string_view path = "/test$file.txt"; + const auto canonical_path = Utility::canonicalizePathString(path, "s3"); + EXPECT_EQ("/test%24file.txt", canonical_path); +} + +TEST(UtilityTest, CanonicalizeS3PathStringWithEncodingSpaces) { + const absl::string_view path = "/test and test/"; + const auto canonical_path = Utility::canonicalizePathString(path, "s3"); + EXPECT_EQ("/test%20and%20test/", canonical_path); +} + +TEST(UtilityTest, EncodePathSegment) { + const absl::string_view path = "test^!@=-_~."; + const auto encoded_path = Utility::encodePathSegment(path, "appmesh"); + EXPECT_EQ("test%5E%21%40%3D-_~.", encoded_path); +} + +TEST(UtilityTest, EncodeS3PathSegment) { + const absl::string_view path = "/test/^!@=/-_~."; + const auto encoded_path = Utility::encodePathSegment(path, "s3"); + EXPECT_EQ("/test/%5E%21%40%3D/-_~.", encoded_path); +} + +TEST(UtilityTest, CanonicalizeQueryString) { + const absl::string_view query = "a=1&b=2"; + const auto canonical_query = Utility::canonicalizeQueryString(query); + EXPECT_EQ("a=1&b=2", canonical_query); +} + +TEST(UtilityTest, CanonicalizeQueryStringTrailingEquals) { + const absl::string_view query = "a&b"; + const auto canonical_query = Utility::canonicalizeQueryString(query); + EXPECT_EQ("a=&b=", canonical_query); +} + +TEST(UtilityTest, CanonicalizeQueryStringSorted) { + const absl::string_view query = "a=3&b=1&a=2&a=1"; + const auto canonical_query = Utility::canonicalizeQueryString(query); + EXPECT_EQ("a=1&a=2&a=3&b=1", canonical_query); +} + +TEST(UtilityTest, CanonicalizeQueryStringEncoded) { + const absl::string_view query = "a=^!@&b=/-_~."; + const auto canonical_query = Utility::canonicalizeQueryString(query); + EXPECT_EQ("a=%5E%21%40&b=%2F-_~.", canonical_query); +} + +TEST(UtilityTest, CanonicalizeQueryStringWithPlus) { + const absl::string_view query = "a=1+2"; + const auto canonical_query = Utility::canonicalizeQueryString(query); + EXPECT_EQ("a=1%202", canonical_query); +} + +TEST(UtilityTest, CanonicalizeQueryStringDoubleEncodeEquals) { + const absl::string_view query = "a=!.!=!"; + const auto canonical_query = Utility::canonicalizeQueryString(query); + EXPECT_EQ("a=%21.%21%253D%21", canonical_query); +} + +TEST(UtilityTest, EncodeQuerySegment) { + const absl::string_view query = "^!@/-_~."; + const auto encoded_query = Utility::encodeQueryParam(query); + EXPECT_EQ("%5E%21%40%2F-_~.", encoded_query); +} + +TEST(UtilityTest, EncodeQuerySegmentReserved) { + const absl::string_view query = "?=&"; + const auto encoded_query = Utility::encodeQueryParam(query); + EXPECT_EQ("%3F%253D%26", encoded_query); +} + +TEST(UtilityTest, CanonicalizationFuzzTest) { + std::string fuzz; + fuzz.reserve(3); + // Printable ASCII 32 - 126 + for (unsigned char i = 32; i <= 126; i++) { + fuzz.push_back(i); + for (unsigned char j = 32; j <= 126; j++) { + fuzz.push_back(j); + for (unsigned char k = 32; k <= 126; k++) { + fuzz.push_back(k); + Utility::encodePathSegment(fuzz, "s3"); + Utility::canonicalizePathString(fuzz, "appmesh"); + Utility::encodeQueryParam(fuzz); + Utility::canonicalizeQueryString(fuzz); + fuzz.pop_back(); + } + fuzz.pop_back(); + } + fuzz.pop_back(); + } +} + // Verify headers are joined with ";" TEST(UtilityTest, JoinCanonicalHeaderNames) { std::map headers = { diff --git a/test/extensions/common/dynamic_forward_proxy/BUILD b/test/extensions/common/dynamic_forward_proxy/BUILD index b280556cba0ce..d9101e6f94a5e 100644 --- a/test/extensions/common/dynamic_forward_proxy/BUILD +++ b/test/extensions/common/dynamic_forward_proxy/BUILD @@ -20,11 +20,13 @@ envoy_cc_test( "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:registry_lib", "//test/test_common:simulated_time_system_lib", "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/key_value/file_based/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 397b87bc1adb2..5b823d55118f2 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -1,6 +1,7 @@ #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/resolver.pb.h" #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "envoy/extensions/key_value/file_based/v3/config.pb.validate.h" #include "source/common/config/utility.h" #include "source/common/network/resolver_impl.h" @@ -8,9 +9,12 @@ #include "source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h" #include "test/extensions/common/dynamic_forward_proxy/mocks.h" +#include "test/mocks/filesystem/mocks.h" #include "test/mocks/network/mocks.h" +#include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/thread_local/mocks.h" +#include "test/test_common/registry.h" #include "test/test_common/simulated_time_system.h" #include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" @@ -43,8 +47,8 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT EXPECT_CALL(dispatcher_, isThreadSafe).WillRepeatedly(Return(true)); EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_)); - dns_cache_ = - std::make_unique(dispatcher_, tls_, random_, loader_, store_, config_); + dns_cache_ = std::make_unique(dispatcher_, tls_, random_, filesystem_, loader_, + store_, validation_visitor_, config_); update_callbacks_handle_ = dns_cache_->addUpdateCallbacks(update_callbacks_); } @@ -73,11 +77,13 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT std::shared_ptr resolver_{std::make_shared()}; NiceMock tls_; NiceMock random_; + NiceMock filesystem_; NiceMock loader_; Stats::IsolatedStoreImpl store_; std::unique_ptr dns_cache_; MockUpdateCallbacks update_callbacks_; DnsCache::AddUpdateCallbacksHandlePtr update_callbacks_handle_; + Envoy::ProtobufMessage::MockValidationVisitor validation_visitor_; }; MATCHER_P3(DnsHostInfoEquals, address, resolved_host, is_ip_address, "") { @@ -108,7 +114,8 @@ MATCHER_P(CustomDnsResolversSizeEquals, expected_resolvers, "") { TEST_F(DnsCacheImplTest, PreresolveSuccess) { Network::DnsResolver::ResolveCb resolve_cb; - EXPECT_CALL(*resolver_, resolve("bar.baz.com", _, _)) + std::string hostname = "bar.baz.com"; + EXPECT_CALL(*resolver_, resolve(hostname, _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); EXPECT_CALL( update_callbacks_, @@ -842,7 +849,8 @@ TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSetDeprecatedField) { envoy::config::core::v3::DnsResolverOptions dns_resolver_options; EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, loader_, store_, config_); + DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, + validation_visitor_, config_); // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -855,7 +863,8 @@ TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSet) { envoy::config::core::v3::DnsResolverOptions dns_resolver_options; EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, loader_, store_, config_); + DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, + validation_visitor_, config_); // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -868,7 +877,8 @@ TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionSet) { envoy::config::core::v3::DnsResolverOptions dns_resolver_options; EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, loader_, store_, config_); + DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, + validation_visitor_, config_); // `true` here means dns_resolver_options.no_default_search_domain is set to true. EXPECT_EQ(true, dns_resolver_options.no_default_search_domain()); } @@ -878,7 +888,8 @@ TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionUnSet) { envoy::config::core::v3::DnsResolverOptions dns_resolver_options; EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, loader_, store_, config_); + DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, + validation_visitor_, config_); // `false` here means dns_resolver_options.use_tcp_for_dns_lookups is set to false. EXPECT_EQ(false, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -888,7 +899,8 @@ TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionUnSet) { envoy::config::core::v3::DnsResolverOptions dns_resolver_options; EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, loader_, store_, config_); + DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, + validation_visitor_, config_); // `false` here means dns_resolver_options.no_default_search_domain is set to false. EXPECT_EQ(false, dns_resolver_options.no_default_search_domain()); } @@ -900,7 +912,9 @@ TEST(DnsCacheManagerImplTest, LoadViaConfig) { NiceMock random; NiceMock loader; Stats::IsolatedStoreImpl store; - DnsCacheManagerImpl cache_manager(dispatcher, tls, random, loader, store); + NiceMock filesystem; + Envoy::ProtobufMessage::MockValidationVisitor visitor; + DnsCacheManagerImpl cache_manager(dispatcher, tls, random, filesystem, loader, store, visitor); envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; config1.set_name("foo"); @@ -937,7 +951,9 @@ TEST(DnsCacheConfigOptionsTest, EmtpyDnsResolutionConfig) { std::vector expected_empty_dns_resolvers; EXPECT_CALL(dispatcher, createDnsResolver(expected_empty_dns_resolvers, _)) .WillOnce(Return(resolver)); - DnsCacheImpl dns_cache_(dispatcher, tls, random, loader, store, config); + NiceMock filesystem; + Envoy::ProtobufMessage::MockValidationVisitor visitor; + DnsCacheImpl dns_cache(dispatcher, tls, random, filesystem, loader, store, visitor, config); } TEST(DnsCacheConfigOptionsTest, NonEmptyDnsResolutionConfig) { @@ -959,7 +975,9 @@ TEST(DnsCacheConfigOptionsTest, NonEmptyDnsResolutionConfig) { EXPECT_CALL(dispatcher, createDnsResolver(CustomDnsResolversSizeEquals(expected_dns_resolvers), _)) .WillOnce(Return(resolver)); - DnsCacheImpl dns_cache_(dispatcher, tls, random, loader, store, config); + NiceMock filesystem; + Envoy::ProtobufMessage::MockValidationVisitor visitor; + DnsCacheImpl dns_cache_(dispatcher, tls, random, filesystem, loader, store, visitor, config); } // Note: this test is done here, rather than a TYPED_TEST_SUITE in @@ -1006,6 +1024,98 @@ TEST(UtilityTest, PrepareDnsRefreshStrategy) { } } +TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { + // Configure the cache. + MockKeyValueStoreFactory factory; + EXPECT_CALL(factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() { + return std::make_unique< + envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig>(); + })); + MockKeyValueStore* store{}; + EXPECT_CALL(factory, createStore(_, _, _, _)).WillOnce(Invoke([&store]() { + auto ret = std::make_unique>(); + store = ret.get(); + // Make sure there's an attempt to load from the key value store. + EXPECT_CALL(*store, iterate); + return ret; + })); + + Registry::InjectFactory injector(factory); + config_.mutable_key_value_config()->mutable_config()->set_name("mock_key_value_store_factory"); + + initialize(); + InSequence s; + ASSERT(store != nullptr); + + MockLoadDnsCacheEntryCallbacks callbacks; + Network::DnsResolver::ResolveCb resolve_cb; + Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); + EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + auto result = dns_cache_->loadDnsCacheEntry("foo.com", 80, callbacks); + EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_); + EXPECT_NE(result.handle_, nullptr); + EXPECT_EQ(absl::nullopt, result.host_info_); + + checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + EXPECT_CALL(*timeout_timer, disableTimer()); + // Make sure the store gets the first insert. + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80")); + EXPECT_CALL(update_callbacks_, + onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); + EXPECT_CALL(callbacks, + onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"10.0.0.1"})); + + checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Re-resolve timer. + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); + EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + resolve_timer->invokeCallback(); + + checkStats(2 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Address does not change. + EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"10.0.0.1"})); + + checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Re-resolve timer. + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); + EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + resolve_timer->invokeCallback(); + + checkStats(3 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + EXPECT_CALL(*timeout_timer, disableTimer()); + // Make sure the store gets the updated address. + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80")); + EXPECT_CALL(update_callbacks_, + onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.2:80", "foo.com", false))); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"10.0.0.2"})); + + checkStats(3 /* attempt */, 3 /* success */, 0 /* failure */, 2 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); +} + } // namespace } // namespace DynamicForwardProxy } // namespace Common diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc index ca334234a4ef1..af6d7e25f7b00 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_header_test.cc @@ -31,9 +31,9 @@ TEST(ProxyProtocolHeaderTest, GeneratesV1IPv4Header) { // Make sure the wrapper utility generates the same output. testing::NiceMock connection; - connection.stream_info_.downstream_address_provider_->setRemoteAddress( + connection.stream_info_.downstream_connection_info_provider_->setRemoteAddress( Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); - connection.stream_info_.downstream_address_provider_->setLocalAddress( + connection.stream_info_.downstream_connection_info_provider_->setLocalAddress( Network::Utility::resolveUrl("tcp://172.0.0.1:80")); Buffer::OwnedImpl util_buf; envoy::config::core::v3::ProxyProtocolConfig config; @@ -94,9 +94,9 @@ TEST(ProxyProtocolHeaderTest, GeneratesV2IPv6Header) { // Make sure the wrapper utility generates the same output. testing::NiceMock connection; - connection.stream_info_.downstream_address_provider_->setRemoteAddress( + connection.stream_info_.downstream_connection_info_provider_->setRemoteAddress( Network::Utility::resolveUrl("tcp://[1:2:3::4]:8")); - connection.stream_info_.downstream_address_provider_->setLocalAddress( + connection.stream_info_.downstream_connection_info_provider_->setLocalAddress( Network::Utility::resolveUrl("tcp://[1:100:200:3::]:2")); Buffer::OwnedImpl util_buf; envoy::config::core::v3::ProxyProtocolConfig config; diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index aea16f0433be4..cb9d97d5593f8 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -48,10 +48,10 @@ class ProxyProtocolRegressionTest : public testing::TestWithParamaddressProvider().localAddress())); + .WillOnce(ReturnRef(socket_->connectionInfoProvider().localAddress())); EXPECT_CALL(socket_factory_, getListenSocket(_)).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); - conn_ = dispatcher_->createClientConnection(socket_->addressProvider().localAddress(), + conn_ = dispatcher_->createClientConnection(socket_->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); conn_->addConnectionCallbacks(connection_callbacks_); @@ -199,9 +199,9 @@ TEST_P(ProxyProtocolRegressionTest, V1Basic) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), source_addr); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -222,9 +222,9 @@ TEST_P(ProxyProtocolRegressionTest, V2Basic) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), source_addr); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -239,13 +239,13 @@ TEST_P(ProxyProtocolRegressionTest, V2LocalConnection) { expectData("more data"); if (GetParam() == Envoy::Network::Address::IpVersion::v4) { - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "127.0.0.1"); } else { - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "::1"); } - EXPECT_FALSE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_FALSE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } diff --git a/test/extensions/common/wasm/wasm_test.cc b/test/extensions/common/wasm/wasm_test.cc index 61f50bfed463a..2e947bc33e814 100644 --- a/test/extensions/common/wasm/wasm_test.cc +++ b/test/extensions/common/wasm/wasm_test.cc @@ -119,28 +119,27 @@ TEST_P(WasmCommonTest, WasmFailState) { wasm->wasm()->setFailStateForTesting(proxy_wasm::FailState::RuntimeError); EXPECT_EQ(toWasmEvent(wasm_base), WasmEvent::RuntimeError); - auto root_context = static_cast(wasm->wasm()->createRootContext(plugin)); - uint32_t grpc_call_token1 = root_context->nextGrpcCallToken(); - uint32_t grpc_call_token2 = root_context->nextGrpcCallToken(); + uint32_t grpc_call_token1 = wasm->wasm()->nextGrpcCallId(); + EXPECT_TRUE(wasm->wasm()->isGrpcCallId(grpc_call_token1)); + uint32_t grpc_call_token2 = wasm->wasm()->nextGrpcCallId(); + EXPECT_TRUE(wasm->wasm()->isGrpcCallId(grpc_call_token2)); EXPECT_NE(grpc_call_token1, grpc_call_token2); - root_context->setNextGrpcTokenForTesting(0); // Rollover. - EXPECT_EQ(root_context->nextGrpcCallToken(), 1); - uint32_t grpc_stream_token1 = root_context->nextGrpcStreamToken(); - uint32_t grpc_stream_token2 = root_context->nextGrpcStreamToken(); + uint32_t grpc_stream_token1 = wasm->wasm()->nextGrpcStreamId(); + EXPECT_TRUE(wasm->wasm()->isGrpcStreamId(grpc_stream_token1)); + uint32_t grpc_stream_token2 = wasm->wasm()->nextGrpcStreamId(); + EXPECT_TRUE(wasm->wasm()->isGrpcStreamId(grpc_stream_token2)); EXPECT_NE(grpc_stream_token1, grpc_stream_token2); - root_context->setNextGrpcTokenForTesting(0xFFFFFFFF); // Rollover. - EXPECT_EQ(root_context->nextGrpcStreamToken(), 2); - uint32_t http_call_token1 = root_context->nextHttpCallToken(); - uint32_t http_call_token2 = root_context->nextHttpCallToken(); + uint32_t http_call_token1 = wasm->wasm()->nextHttpCallId(); + EXPECT_TRUE(wasm->wasm()->isHttpCallId(http_call_token1)); + uint32_t http_call_token2 = wasm->wasm()->nextHttpCallId(); + EXPECT_TRUE(wasm->wasm()->isHttpCallId(http_call_token2)); EXPECT_NE(http_call_token1, http_call_token2); - root_context->setNextHttpCallTokenForTesting(0); // Rollover. - EXPECT_EQ(root_context->nextHttpCallToken(), 1); + auto root_context = static_cast(wasm->wasm()->createRootContext(plugin)); EXPECT_EQ(root_context->getBuffer(WasmBufferType::HttpCallResponseBody), nullptr); EXPECT_EQ(root_context->getBuffer(WasmBufferType::PluginConfiguration), nullptr); - delete root_context; Filters::Common::Expr::CelStatePrototype wasm_state_prototype( diff --git a/test/extensions/common/wasm/wasm_vm_test.cc b/test/extensions/common/wasm/wasm_vm_test.cc index f812bc88cbe24..dd4b7412af3cc 100644 --- a/test/extensions/common/wasm/wasm_vm_test.cc +++ b/test/extensions/common/wasm/wasm_vm_test.cc @@ -103,18 +103,18 @@ class MockHostFunctions { #if defined(PROXY_WASM_HAS_RUNTIME_V8) MockHostFunctions* g_host_functions; -void pong(void*, Word value) { g_host_functions->pong(convertWordToUint32(value)); } +void pong(Word value) { g_host_functions->pong(convertWordToUint32(value)); } -Word random(void*) { return {g_host_functions->random()}; } +Word random() { return {g_host_functions->random()}; } // pong() with wrong number of arguments. -void bad_pong1(void*) {} +void badPong1() {} // pong() with wrong return type. -Word bad_pong2(void*, Word) { return 2; } +Word badPong2(Word) { return 2; } // pong() with wrong argument type. -double bad_pong3(void*, double) { return 3; } +double badPong3(double) { return 3; } class WasmVmTest : public testing::TestWithParam { public: @@ -195,13 +195,13 @@ TEST_P(WasmVmTest, V8BadHostFunctions) { wasm_vm_->registerCallback("env", "random", &random, CONVERT_FUNCTION_WORD_TO_UINT32(random)); EXPECT_FALSE(wasm_vm_->link("test")); - wasm_vm_->registerCallback("env", "pong", &bad_pong1, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong1)); + wasm_vm_->registerCallback("env", "pong", &badPong1, CONVERT_FUNCTION_WORD_TO_UINT32(badPong1)); EXPECT_FALSE(wasm_vm_->link("test")); - wasm_vm_->registerCallback("env", "pong", &bad_pong2, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong2)); + wasm_vm_->registerCallback("env", "pong", &badPong2, CONVERT_FUNCTION_WORD_TO_UINT32(badPong2)); EXPECT_FALSE(wasm_vm_->link("test")); - wasm_vm_->registerCallback("env", "pong", &bad_pong3, CONVERT_FUNCTION_WORD_TO_UINT32(bad_pong3)); + wasm_vm_->registerCallback("env", "pong", &badPong3, CONVERT_FUNCTION_WORD_TO_UINT32(badPong3)); EXPECT_FALSE(wasm_vm_->link("test")); } diff --git a/test/extensions/filters/common/expr/BUILD b/test/extensions/filters/common/expr/BUILD index b1ebb4283ba7b..694eb4b241cdd 100644 --- a/test/extensions/filters/common/expr/BUILD +++ b/test/extensions/filters/common/expr/BUILD @@ -52,7 +52,7 @@ envoy_proto_library( envoy_cc_fuzz_test( name = "evaluator_fuzz_test", srcs = ["evaluator_fuzz_test.cc"], - corpus = ":evaluator_corpus", + corpus = "evaluator_corpus", deps = [ ":evaluator_fuzz_proto_cc_proto", "//source/extensions/filters/common/expr:evaluator_lib", diff --git a/test/extensions/filters/common/expr/context_test.cc b/test/extensions/filters/common/expr/context_test.cc index f7b1931195e46..2f7fe9d0f95ce 100644 --- a/test/extensions/filters/common/expr/context_test.cc +++ b/test/extensions/filters/common/expr/context_test.cc @@ -438,10 +438,10 @@ TEST(Context, ConnectionAttributes) { Network::Address::InstanceConstSharedPtr upstream_local_address = Network::Utility::parseInternetAddress("10.1.2.3", 1000, false); const std::string sni_name = "kittens.com"; - info.downstream_address_provider_->setLocalAddress(local); - info.downstream_address_provider_->setRemoteAddress(remote); - info.downstream_address_provider_->setRequestedServerName(sni_name); - EXPECT_CALL(info, downstreamSslConnection()).WillRepeatedly(Return(downstream_ssl_info)); + info.downstream_connection_info_provider_->setLocalAddress(local); + info.downstream_connection_info_provider_->setRemoteAddress(remote); + info.downstream_connection_info_provider_->setRequestedServerName(sni_name); + info.downstream_connection_info_provider_->setSslConnection(downstream_ssl_info); EXPECT_CALL(info, upstreamSslConnection()).WillRepeatedly(Return(upstream_ssl_info)); EXPECT_CALL(info, upstreamHost()).WillRepeatedly(Return(upstream_host)); EXPECT_CALL(info, upstreamLocalAddress()).WillRepeatedly(ReturnRef(upstream_local_address)); @@ -449,7 +449,7 @@ TEST(Context, ConnectionAttributes) { EXPECT_CALL(info, upstreamTransportFailureReason()) .WillRepeatedly(ReturnRef(upstream_transport_failure_reason)); EXPECT_CALL(info, connectionID()).WillRepeatedly(Return(123)); - info.downstream_address_provider_->setConnectionID(123); + info.downstream_connection_info_provider_->setConnectionID(123); const absl::optional connection_termination_details = "unauthorized"; EXPECT_CALL(info, connectionTerminationDetails()) .WillRepeatedly(ReturnRef(connection_termination_details)); diff --git a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc index 23ac64531a73a..19db3ecee0c6c 100644 --- a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc +++ b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc @@ -35,8 +35,8 @@ class CheckRequestUtilsTest : public testing::Test { void expectBasicHttp() { EXPECT_CALL(callbacks_, connection()).Times(2).WillRepeatedly(Return(&connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(ssl_)); EXPECT_CALL(callbacks_, streamId()).WillOnce(Return(0)); EXPECT_CALL(callbacks_, decodingBuffer()).WillOnce(Return(buffer_.get())); @@ -113,8 +113,8 @@ class CheckRequestUtilsTest : public testing::Test { TEST_F(CheckRequestUtilsTest, BasicTcp) { envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(net_callbacks_, connection()).Times(2).WillRepeatedly(ReturnRef(connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(ssl_)); EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); EXPECT_CALL(*ssl_, uriSanLocalCertificate()) @@ -134,8 +134,8 @@ TEST_F(CheckRequestUtilsTest, BasicTcp) { TEST_F(CheckRequestUtilsTest, TcpPeerCertificate) { envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(net_callbacks_, connection()).Times(2).WillRepeatedly(ReturnRef(connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(Const(connection_), ssl()).Times(2).WillRepeatedly(Return(ssl_)); EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); EXPECT_CALL(*ssl_, uriSanLocalCertificate()) @@ -176,6 +176,34 @@ TEST_F(CheckRequestUtilsTest, BasicHttp) { EXPECT_TRUE(request_.attributes().request().has_time()); } +// Verify that check request merges the duplicate headers. +TEST_F(CheckRequestUtilsTest, BasicHttpWithDuplicateHeaders) { + const uint64_t size = 0; + envoy::service::auth::v3::CheckRequest request_; + + // A client supplied duplicate header should be merged. + Http::TestRequestHeaderMapImpl request_headers{ + {"x-duplicate-header", ""}, {"x-duplicate-header", "foo"}, {"x-duplicate-header", "bar"}, + {"x-normal-header", "foo"}, {"x-empty-header", ""}, + }; + + EXPECT_CALL(*ssl_, uriSanPeerCertificate()).WillOnce(Return(std::vector{"source"})); + EXPECT_CALL(*ssl_, uriSanLocalCertificate()) + .WillOnce(Return(std::vector{"destination"})); + expectBasicHttp(); + CheckRequestUtils::createHttpCheck(&callbacks_, request_headers, + Protobuf::Map(), + envoy::config::core::v3::Metadata(), request_, size, + /*pack_as_bytes=*/false, /*include_peer_certificate=*/false, + Protobuf::Map()); + ASSERT_EQ(size, request_.attributes().request().http().body().size()); + EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); + EXPECT_EQ(",foo,bar", request_.attributes().request().http().headers().at("x-duplicate-header")); + EXPECT_EQ("foo", request_.attributes().request().http().headers().at("x-normal-header")); + EXPECT_EQ("", request_.attributes().request().http().headers().at("x-empty-header")); + EXPECT_TRUE(request_.attributes().request().has_time()); +} + // Verify that check request object has only a portion of the request data. TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { const uint64_t size = 4049; @@ -267,8 +295,8 @@ TEST_F(CheckRequestUtilsTest, CheckAttrContextPeer) { {":path", "/bar"}}; envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(callbacks_, connection()).WillRepeatedly(Return(&connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(Const(connection_), ssl()).WillRepeatedly(Return(ssl_)); EXPECT_CALL(callbacks_, streamId()).WillRepeatedly(Return(0)); EXPECT_CALL(callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index f771573698d1e..8d776a73b061f 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -31,14 +31,12 @@ namespace ExtAuthz { using Params = std::tuple; -class ExtAuthzGrpcClientTest : public testing::TestWithParam { +class ExtAuthzGrpcClientTest : public testing::Test { public: ExtAuthzGrpcClientTest() : async_client_(new Grpc::MockAsyncClient()), timeout_(10) {} - void initialize(const Params& param) { - api_version_ = std::get<0>(param); - client_ = std::make_unique(Grpc::RawAsyncClientPtr{async_client_}, timeout_, - api_version_); + void initialize() { + client_ = std::make_unique(Grpc::RawAsyncClientPtr{async_client_}, timeout_); } void expectCallSend(envoy::service::auth::v3::CheckRequest& request) { @@ -48,9 +46,7 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { Invoke([this](absl::string_view service_full_name, absl::string_view method_name, Buffer::InstancePtr&&, Grpc::RawAsyncRequestCallbacks&, Tracing::Span&, const Http::AsyncClient::RequestOptions& options) -> Grpc::AsyncRequest* { - EXPECT_EQ(TestUtility::getVersionedServiceFullName( - "envoy.service.auth.{}.Authorization", api_version_), - service_full_name); + EXPECT_EQ("envoy.service.auth.v3.Authorization", service_full_name); EXPECT_EQ("Check", method_name); EXPECT_EQ(timeout_->count(), options.timeout->count()); return &async_request_; @@ -67,14 +63,9 @@ class ExtAuthzGrpcClientTest : public testing::TestWithParam { envoy::config::core::v3::ApiVersion api_version_; }; -INSTANTIATE_TEST_SUITE_P(Parameterized, ExtAuthzGrpcClientTest, - Values(Params(envoy::config::core::v3::ApiVersion::AUTO), - Params(envoy::config::core::v3::ApiVersion::V2), - Params(envoy::config::core::v3::ApiVersion::V3))); - // Test the client when an ok response is received. -TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, AuthorizationOk) { + initialize(); auto check_response = std::make_unique(); auto status = check_response->mutable_status(); @@ -111,8 +102,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOk) { } // Test the client when an ok response is received. -TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) { + initialize(); const std::string empty_body{}; const auto expected_headers = TestCommon::makeHeaderValueOption({{"foo", "bar", false}}); @@ -138,8 +129,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithAllAtributes) { } // Test the client when a denied response is received. -TEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, AuthorizationDenied) { + initialize(); auto check_response = std::make_unique(); auto status = check_response->mutable_status(); @@ -162,8 +153,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDenied) { } // Test the client when a gRPC status code unknown is received from the authorization server. -TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { + initialize(); auto check_response = std::make_unique(); auto status = check_response->mutable_status(); @@ -186,8 +177,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedGrpcUnknownStatus) { } // Test the client when a denied response with additional HTTP attributes is received. -TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { + initialize(); const std::string expected_body{"test"}; const auto expected_headers = @@ -215,8 +206,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { } // Test the client when an unknown error occurs. -TEST_P(ExtAuthzGrpcClientTest, UnknownError) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, UnknownError) { + initialize(); envoy::service::auth::v3::CheckRequest request; expectCallSend(request); @@ -228,8 +219,8 @@ TEST_P(ExtAuthzGrpcClientTest, UnknownError) { } // Test the client when the request is canceled. -TEST_P(ExtAuthzGrpcClientTest, CancelledAuthorizationRequest) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, CancelledAuthorizationRequest) { + initialize(); envoy::service::auth::v3::CheckRequest request; EXPECT_CALL(*async_client_, sendRaw(_, _, _, _, _, _)).WillOnce(Return(&async_request_)); @@ -240,8 +231,8 @@ TEST_P(ExtAuthzGrpcClientTest, CancelledAuthorizationRequest) { } // Test the client when the request times out. -TEST_P(ExtAuthzGrpcClientTest, AuthorizationRequestTimeout) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, AuthorizationRequestTimeout) { + initialize(); envoy::service::auth::v3::CheckRequest request; expectCallSend(request); @@ -253,8 +244,8 @@ TEST_P(ExtAuthzGrpcClientTest, AuthorizationRequestTimeout) { } // Test the client when an OK response is received with dynamic metadata in that OK response. -TEST_P(ExtAuthzGrpcClientTest, AuthorizationOkWithDynamicMetadata) { - initialize(GetParam()); +TEST_F(ExtAuthzGrpcClientTest, AuthorizationOkWithDynamicMetadata) { + initialize(); auto check_response = std::make_unique(); auto status = check_response->mutable_status(); diff --git a/test/extensions/filters/common/original_src/original_src_socket_option_test.cc b/test/extensions/filters/common/original_src/original_src_socket_option_test.cc index 7159e32273659..c123e0253f680 100644 --- a/test/extensions/filters/common/original_src/original_src_socket_option_test.cc +++ b/test/extensions/filters/common/original_src/original_src_socket_option_test.cc @@ -36,14 +36,14 @@ TEST_F(OriginalSrcSocketOptionTest, TestSetOptionPreBindSetsAddress) { const auto address = Network::Utility::parseInternetAddress("127.0.0.2"); auto option = makeOptionByAddress(address); EXPECT_EQ(option->setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND), true); - EXPECT_EQ(*socket_.address_provider_->localAddress(), *address); + EXPECT_EQ(*socket_.connection_info_provider_->localAddress(), *address); } TEST_F(OriginalSrcSocketOptionTest, TestSetOptionPreBindSetsAddressSecond) { const auto address = Network::Utility::parseInternetAddress("1.2.3.4"); auto option = makeOptionByAddress(address); EXPECT_EQ(option->setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND), true); - EXPECT_EQ(*socket_.address_provider_->localAddress(), *address); + EXPECT_EQ(*socket_.connection_info_provider_->localAddress(), *address); } TEST_F(OriginalSrcSocketOptionTest, TestSetOptionNotPrebindDoesNotSetAddress) { @@ -51,7 +51,7 @@ TEST_F(OriginalSrcSocketOptionTest, TestSetOptionNotPrebindDoesNotSetAddress) { auto option = makeOptionByAddress(address); EXPECT_EQ(option->setOption(socket_, envoy::config::core::v3::SocketOption::STATE_LISTENING), true); - EXPECT_NE(*socket_.address_provider_->localAddress(), *address); + EXPECT_NE(*socket_.connection_info_provider_->localAddress(), *address); } TEST_F(OriginalSrcSocketOptionTest, TestIpv4HashKey) { diff --git a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc index 00c87f500904a..20c63eba80f1a 100644 --- a/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc +++ b/test/extensions/filters/common/ratelimit/ratelimit_impl_test.cc @@ -54,8 +54,8 @@ class RateLimitGrpcClientTest : public testing::Test { public: RateLimitGrpcClientTest() : async_client_(new Grpc::MockAsyncClient()), - client_(Grpc::RawAsyncClientPtr{async_client_}, absl::optional(), - envoy::config::core::v3::ApiVersion::AUTO) {} + client_(Grpc::RawAsyncClientPtr{async_client_}, + absl::optional()) {} Grpc::MockAsyncClient* async_client_; Grpc::MockAsyncRequest async_request_; @@ -77,7 +77,7 @@ TEST_F(RateLimitGrpcClientTest, Basic) { Invoke([this](absl::string_view service_full_name, absl::string_view method_name, Buffer::InstancePtr&&, Grpc::RawAsyncRequestCallbacks&, Tracing::Span&, const Http::AsyncClient::RequestOptions&) -> Grpc::AsyncRequest* { - std::string service_name = "envoy.service.ratelimit.v2.RateLimitService"; + std::string service_name = "envoy.service.ratelimit.v3.RateLimitService"; EXPECT_EQ(service_name, service_full_name); EXPECT_EQ("ShouldRateLimit", method_name); return &async_request_; diff --git a/test/extensions/filters/common/rbac/engine_impl_test.cc b/test/extensions/filters/common/rbac/engine_impl_test.cc index 87e10cfb23d62..8e56a2023b4f0 100644 --- a/test/extensions/filters/common/rbac/engine_impl_test.cc +++ b/test/extensions/filters/common/rbac/engine_impl_test.cc @@ -176,11 +176,11 @@ TEST(RoleBasedAccessControlEngineImpl, AllowedAllowlist) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkEngine(engine, true, LogResult::Undecided, info, conn, headers); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkEngine(engine, false, LogResult::Undecided, info, conn, headers); } @@ -199,11 +199,11 @@ TEST(RoleBasedAccessControlEngineImpl, DeniedDenylist) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkEngine(engine, false, LogResult::Undecided, info, conn, headers); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkEngine(engine, true, LogResult::Undecided, info, conn, headers); } @@ -412,7 +412,7 @@ TEST(RoleBasedAccessControlEngineImpl, ConjunctiveCondition) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkEngine(engine, false, LogResult::Undecided, info, conn, headers); } @@ -444,11 +444,11 @@ TEST(RoleBasedAccessControlEngineImpl, LogIfMatched) { Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkEngine(engine, true, RBAC::LogResult::Yes, info, conn, headers); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkEngine(engine, true, RBAC::LogResult::No, info, conn, headers); } diff --git a/test/extensions/filters/common/rbac/matchers_test.cc b/test/extensions/filters/common/rbac/matchers_test.cc index 87d22517dd428..09f9f75d08863 100644 --- a/test/extensions/filters/common/rbac/matchers_test.cc +++ b/test/extensions/filters/common/rbac/matchers_test.cc @@ -4,6 +4,7 @@ #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/type/matcher/v3/metadata.pb.h" +#include "source/common/network/address_impl.h" #include "source/common/network/utility.h" #include "source/extensions/filters/common/expr/evaluator.h" #include "source/extensions/filters/common/rbac/matchers.h" @@ -33,6 +34,10 @@ void checkMatcher( EXPECT_EQ(expected, matcher.matches(connection, headers, info)); } +PortRangeMatcher createPortRangeMatcher(envoy::type::v3::Int32Range range) { + return PortRangeMatcher(range); +} + TEST(AlwaysMatcher, AlwaysMatches) { checkMatcher(RBAC::AlwaysMatcher(), true); } TEST(AndMatcher, Permission_Set) { @@ -50,12 +55,12 @@ TEST(AndMatcher, Permission_Set) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkMatcher(RBAC::AndMatcher(set), true, conn, headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 8080, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkMatcher(RBAC::AndMatcher(set), false, conn, headers, info); } @@ -77,12 +82,12 @@ TEST(AndMatcher, Principal_Set) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - info.downstream_address_provider_->setDirectRemoteAddressForTest(addr); + info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(addr); checkMatcher(RBAC::AndMatcher(set), true, conn, headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.4.6", 123, false); - info.downstream_address_provider_->setDirectRemoteAddressForTest(addr); + info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(addr); checkMatcher(RBAC::AndMatcher(set), false, conn, headers, info); } @@ -97,7 +102,13 @@ TEST(OrMatcher, Permission_Set) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); + + checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info); + + perm = set.add_rules(); + perm->mutable_destination_port_range()->set_start(123); + perm->mutable_destination_port_range()->set_end(456); checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info); @@ -119,7 +130,7 @@ TEST(OrMatcher, Principal_Set) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.4.6", 456, false); - info.downstream_address_provider_->setDirectRemoteAddressForTest(addr); + info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(addr); checkMatcher(RBAC::OrMatcher(set), false, conn, headers, info); @@ -176,10 +187,10 @@ TEST(IPMatcher, IPMatcher) { Envoy::Network::Utility::parseInternetAddress("4.5.6.7", 456, false); Envoy::Network::Address::InstanceConstSharedPtr downstream_remote = Envoy::Network::Utility::parseInternetAddress("8.9.10.11", 456, false); - conn.stream_info_.downstream_address_provider_->setRemoteAddress(connection_remote); - info.downstream_address_provider_->setLocalAddress(direct_local); - info.downstream_address_provider_->setDirectRemoteAddressForTest(direct_remote); - info.downstream_address_provider_->setRemoteAddress(downstream_remote); + conn.stream_info_.downstream_connection_info_provider_->setRemoteAddress(connection_remote); + info.downstream_connection_info_provider_->setLocalAddress(direct_local); + info.downstream_connection_info_provider_->setDirectRemoteAddressForTest(direct_remote); + info.downstream_connection_info_provider_->setRemoteAddress(downstream_remote); envoy::config::core::v3::CidrRange connection_remote_cidr; connection_remote_cidr.set_address_prefix("12.13.14.15"); @@ -227,12 +238,64 @@ TEST(PortMatcher, PortMatcher) { NiceMock info; Envoy::Network::Address::InstanceConstSharedPtr addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 123, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkMatcher(PortMatcher(123), true, conn, headers, info); checkMatcher(PortMatcher(456), false, conn, headers, info); } +// Test valid and invalid destination_port_range permission rule in RBAC. +TEST(PortRangeMatcher, PortRangeMatcher) { + Envoy::Network::MockConnection conn; + Envoy::Http::TestRequestHeaderMapImpl headers; + NiceMock info; + Envoy::Network::Address::InstanceConstSharedPtr addr = + Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 456, false); + info.downstream_connection_info_provider_->setLocalAddress(addr); + + // IP address with port 456 is in range [123, 789) and [456, 789), but not in range [123, 456) or + // [12, 34). + envoy::type::v3::Int32Range range; + range.set_start(123); + range.set_end(789); + checkMatcher(PortRangeMatcher(range), true, conn, headers, info); + + range.set_start(456); + range.set_end(789); + checkMatcher(PortRangeMatcher(range), true, conn, headers, info); + + range.set_start(123); + range.set_end(456); + checkMatcher(PortRangeMatcher(range), false, conn, headers, info); + + range.set_start(12); + range.set_end(34); + checkMatcher(PortRangeMatcher(range), false, conn, headers, info); + + // Only IP address is valid for the permission rule. + NiceMock info2; + Envoy::Network::Address::InstanceConstSharedPtr addr2 = + std::make_shared("test"); + info2.downstream_connection_info_provider_->setLocalAddress(addr2); + checkMatcher(PortRangeMatcher(range), false, conn, headers, info2); + + // Invalid rule will cause an exception. + range.set_start(-1); + range.set_end(80); + EXPECT_THROW_WITH_REGEX(createPortRangeMatcher(range), EnvoyException, + "range start .* is out of bounds"); + + range.set_start(80); + range.set_end(65537); + EXPECT_THROW_WITH_REGEX(createPortRangeMatcher(range), EnvoyException, + "range end .* is out of bounds"); + + range.set_start(80); + range.set_end(80); + EXPECT_THROW_WITH_REGEX(createPortRangeMatcher(range), EnvoyException, + "range start .* cannot be greater or equal than range end .*"); +} + TEST(AuthenticatedMatcher, uriSanPeerCertificate) { Envoy::Network::MockConnection conn; auto ssl = std::make_shared(); @@ -373,17 +436,17 @@ TEST(PolicyMatcher, PolicyMatcher) { EXPECT_CALL(*ssl, subjectPeerCertificate()).WillRepeatedly(ReturnRef(subject)); EXPECT_CALL(Const(conn), ssl()).Times(2).WillRepeatedly(Return(ssl)); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkMatcher(matcher, true, conn, headers, info); EXPECT_CALL(Const(conn), ssl()).Times(2).WillRepeatedly(Return(nullptr)); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkMatcher(matcher, false, conn, headers, info); addr = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", 789, false); - info.downstream_address_provider_->setLocalAddress(addr); + info.downstream_connection_info_provider_->setLocalAddress(addr); checkMatcher(matcher, false, conn, headers, info); } diff --git a/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc b/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc index 344da0f9bf0b5..74660d6a2c6b2 100644 --- a/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc +++ b/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc @@ -60,7 +60,7 @@ TEST_P(FilterIntegrationTest, AltSvc) { {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}, {"x-lyft-user-id", "123"}, {"x-forwarded-for", "10.0.0.1"}}; int port = fake_upstreams_[0]->localAddress()->ip()->port(); - std::string alt_svc = absl::StrCat("h3-29=\":", port, "\"; ma=86400"); + std::string alt_svc = absl::StrCat("h3=\":", port, "\"; ma=86400"); Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, {"alt-svc", alt_svc}}; // First request should go out over HTTP/2. The response includes an Alt-Svc header. diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc index ad4c1dbd93b28..6f8155d824560 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_test.cc @@ -100,7 +100,7 @@ TEST_F(AwsLambdaFilterTest, PerRouteConfigWrongClusterMetadata) { setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/}); FilterSettings route_settings{arn_, InvocationMode::Synchronous, true /*passthrough*/}; - ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.aws_lambda")) + ON_CALL(*decoder_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.aws_lambda")) .WillByDefault(Return(&route_settings)); ON_CALL(*decoder_callbacks_.cluster_info_, metadata()).WillByDefault(ReturnRef(metadata)); @@ -122,7 +122,7 @@ TEST_F(AwsLambdaFilterTest, PerRouteConfigWrongClusterMetadata) { TEST_F(AwsLambdaFilterTest, PerRouteConfigCorrectClusterMetadata) { setupFilter({arn_, InvocationMode::Synchronous, true /*passthrough*/}); FilterSettings route_settings{arn_, InvocationMode::Synchronous, true /*passthrough*/}; - ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.aws_lambda")) + ON_CALL(*decoder_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.aws_lambda")) .WillByDefault(Return(&route_settings)); Http::TestRequestHeaderMapImpl headers; diff --git a/test/extensions/filters/http/buffer/buffer_filter_test.cc b/test/extensions/filters/http/buffer/buffer_filter_test.cc index 1f58977e864dc..fd672c6242998 100644 --- a/test/extensions/filters/http/buffer/buffer_filter_test.cc +++ b/test/extensions/filters/http/buffer/buffer_filter_test.cc @@ -37,15 +37,6 @@ class BufferFilterTest : public testing::Test { filter_.setDecoderFilterCallbacks(callbacks_); } - void routeLocalConfig(const Router::RouteSpecificFilterConfig* route_settings, - const Router::RouteSpecificFilterConfig* vhost_settings) { - ON_CALL(callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.buffer")) - .WillByDefault(Return(route_settings)); - ON_CALL(callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.buffer")) - .WillByDefault(Return(vhost_settings)); - } - NiceMock callbacks_; BufferFilterConfigSharedPtr config_; BufferFilter filter_; @@ -133,16 +124,14 @@ TEST_F(BufferFilterTest, ContentLengthPopulationAlreadyPresent) { EXPECT_EQ(headers.getContentLengthValue(), "3"); } -TEST_F(BufferFilterTest, RouteConfigOverride) { - envoy::extensions::filters::http::buffer::v3::BufferPerRoute route_cfg; - auto* buf = route_cfg.mutable_buffer(); +TEST_F(BufferFilterTest, PerFilterConfigOverride) { + envoy::extensions::filters::http::buffer::v3::BufferPerRoute per_route_cfg; + auto* buf = per_route_cfg.mutable_buffer(); buf->mutable_max_request_bytes()->set_value(123); - envoy::extensions::filters::http::buffer::v3::BufferPerRoute vhost_cfg; - vhost_cfg.set_disabled(true); - BufferFilterSettings route_settings(route_cfg); - BufferFilterSettings vhost_settings(vhost_cfg); - routeLocalConfig(&route_settings, &vhost_settings); + BufferFilterSettings route_settings(per_route_cfg); + EXPECT_CALL(*callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.buffer")) + .WillOnce(Return(&route_settings)); EXPECT_CALL(callbacks_, setDecoderBufferLimit(123ULL)); Http::TestRequestHeaderMapImpl headers; @@ -151,26 +140,13 @@ TEST_F(BufferFilterTest, RouteConfigOverride) { filter_.onDestroy(); } -TEST_F(BufferFilterTest, VHostConfigOverride) { - envoy::extensions::filters::http::buffer::v3::BufferPerRoute vhost_cfg; - auto* buf = vhost_cfg.mutable_buffer(); - buf->mutable_max_request_bytes()->set_value(789); - BufferFilterSettings vhost_settings(vhost_cfg); - routeLocalConfig(nullptr, &vhost_settings); - - EXPECT_CALL(callbacks_, setDecoderBufferLimit(789ULL)); - - Http::TestRequestHeaderMapImpl headers; - EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_.decodeHeaders(headers, false)); - filter_.onDestroy(); -} - -TEST_F(BufferFilterTest, RouteDisabledConfigOverride) { - envoy::extensions::filters::http::buffer::v3::BufferPerRoute vhost_cfg; - vhost_cfg.set_disabled(true); - BufferFilterSettings vhost_settings(vhost_cfg); - routeLocalConfig(nullptr, &vhost_settings); +TEST_F(BufferFilterTest, PerFilterConfigDisabledConfigOverride) { + envoy::extensions::filters::http::buffer::v3::BufferPerRoute per_route_cfg; + per_route_cfg.set_disabled(true); + BufferFilterSettings route_settings(per_route_cfg); + EXPECT_CALL(*callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.buffer")) + .WillOnce(Return(&route_settings)); Http::TestRequestHeaderMapImpl headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, false)); Buffer::OwnedImpl data1("hello"); diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index 7421ec6a44f0b..d275df8cc48f3 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -182,6 +182,18 @@ TEST_P(CacheIntegrationTest, ExpiredValidated) { // a freshly served response from the origin, unless the 304 response has an Age header, which // means it was served by an upstream cache. EXPECT_EQ(response_decoder->headers().get(Http::CustomHeaders::get().Age).size(), 0); + } + + // Advance time to get a fresh cached response + simTime().advanceTimeWait(Seconds(1)); + + // Send third request. The cached response was validated, thus it should have an Age header like + // fresh responses + { + IntegrationStreamDecoderPtr response_decoder = + sendHeaderOnlyRequestAwaitResponse(request_headers, serveFromCache()); + EXPECT_THAT(response_decoder->headers(), + HeaderHasValueRef(Http::CustomHeaders::get().Age, "1")); // Advance time to force a log flush. simTime().advanceTimeWait(Seconds(1)); diff --git a/test/extensions/filters/http/cache/cache_headers_utils_test.cc b/test/extensions/filters/http/cache/cache_headers_utils_test.cc index 2c0795195efee..fe1fa7098bf60 100644 --- a/test/extensions/filters/http/cache/cache_headers_utils_test.cc +++ b/test/extensions/filters/http/cache/cache_headers_utils_test.cc @@ -22,6 +22,17 @@ namespace HttpFilters { namespace Cache { namespace { +Protobuf::RepeatedPtrField<::envoy::type::matcher::v3::StringMatcher> +toStringMatchers(std::initializer_list allow_list) { + Protobuf::RepeatedPtrField<::envoy::type::matcher::v3::StringMatcher> proto_allow_list; + for (const auto& rule : allow_list) { + ::envoy::type::matcher::v3::StringMatcher* matcher = proto_allow_list.Add(); + matcher->set_exact(std::string(rule)); + } + + return proto_allow_list; +} + struct TestRequestCacheControl : public RequestCacheControl { TestRequestCacheControl(bool must_validate, bool no_store, bool no_transform, bool only_if_cached, OptionalDuration max_age, OptionalDuration min_fresh, @@ -453,7 +464,7 @@ TEST(GetAllMatchingHeaderNames, EmptyRuleset) { CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); - ASSERT_TRUE(result.empty()); + EXPECT_TRUE(result.empty()); } TEST(GetAllMatchingHeaderNames, EmptyHeaderMap) { @@ -463,11 +474,13 @@ TEST(GetAllMatchingHeaderNames, EmptyHeaderMap) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("accept"); - ruleset.emplace_back(std::make_unique(matcher)); + ruleset.emplace_back( + std::make_unique>( + matcher)); CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); - ASSERT_TRUE(result.empty()); + EXPECT_TRUE(result.empty()); } TEST(GetAllMatchingHeaderNames, SingleMatchSingleValue) { @@ -477,7 +490,9 @@ TEST(GetAllMatchingHeaderNames, SingleMatchSingleValue) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("accept"); - ruleset.emplace_back(std::make_unique(matcher)); + ruleset.emplace_back( + std::make_unique>( + matcher)); CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); @@ -492,7 +507,9 @@ TEST(GetAllMatchingHeaderNames, SingleMatchMultiValue) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("accept"); - ruleset.emplace_back(std::make_unique(matcher)); + ruleset.emplace_back( + std::make_unique>( + matcher)); CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); @@ -507,9 +524,13 @@ TEST(GetAllMatchingHeaderNames, MultipleMatches) { envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("accept"); - ruleset.emplace_back(std::make_unique(matcher)); + ruleset.emplace_back( + std::make_unique>( + matcher)); matcher.set_exact("accept-language"); - ruleset.emplace_back(std::make_unique(matcher)); + ruleset.emplace_back( + std::make_unique>( + matcher)); CacheHeadersUtils::getAllMatchingHeaderNames(headers, ruleset, result); @@ -518,163 +539,269 @@ TEST(GetAllMatchingHeaderNames, MultipleMatches) { EXPECT_TRUE(result.contains("accept-language")); } -TEST(ParseCommaDelimitedList, Null) { - Http::TestResponseHeaderMapImpl headers; - std::vector result = - CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::CustomHeaders::get().Vary)); +struct ParseCommaDelimitedHeaderTestCase { + absl::string_view name; + std::vector header_entries; + std::vector expected_values; +}; - EXPECT_EQ(result.size(), 0); +std::string getParseCommaDelimitedHeaderTestName( + const testing::TestParamInfo& info) { + return std::string(info.param.name); +} + +std::vector parseCommaDelimitedHeaderTestParams() { + return { + { + "Null", + {}, + {}, + }, + { + "Empty", + {}, + {}, + }, + { + "SingleValue", + {"accept"}, + {"accept"}, + }, + { + "MultiValue", + {"accept,accept-language"}, + {"accept", "accept-language"}, + }, + { + "MultiValueLeadingSpace", + {" accept,accept-language"}, + {"accept", "accept-language"}, + }, + { + "MultiValueSpaceAfterValue", + {"accept ,accept-language"}, + {"accept", "accept-language"}, + }, + { + "MultiValueTrailingSpace", + {"accept,accept-language "}, + {"accept", "accept-language"}, + }, + { + "MultiValueLotsOfSpaces", + {" accept , accept-language "}, + {"accept", "accept-language"}, + }, + { + "MultiEntry", + {"accept", "accept-language"}, + {"accept", "accept-language"}, + }, + { + "MultiEntryMultiValue", + {"accept,accept-language", "foo,bar"}, + {"accept", "accept-language", "foo", "bar"}, + }, + { + "MultiEntryMultiValueWithSpaces", + {"accept, accept-language ", "foo ,bar"}, + {"accept", "accept-language", "foo", "bar"}, + }, + }; +} + +class ParseCommaDelimitedHeaderTest + : public testing::TestWithParam {}; + +INSTANTIATE_TEST_SUITE_P(ParseCommaDelimitedHeaderTest, ParseCommaDelimitedHeaderTest, + testing::ValuesIn(parseCommaDelimitedHeaderTestParams()), + getParseCommaDelimitedHeaderTestName); + +TEST_P(ParseCommaDelimitedHeaderTest, ParseCommaDelimitedHeader) { + ParseCommaDelimitedHeaderTestCase test_case = GetParam(); + const Http::LowerCaseString header_name = Http::CustomHeaders::get().Vary; + Http::TestResponseHeaderMapImpl headers; + for (absl::string_view entry : test_case.header_entries) { + headers.addCopy(header_name, entry); + } + std::vector result = + CacheHeadersUtils::parseCommaDelimitedHeader(headers.get(header_name)); + std::vector expected(test_case.expected_values.begin(), + test_case.expected_values.end()); + EXPECT_EQ(result, expected); } -TEST(ParseCommaDelimitedList, Empty) { - Http::TestResponseHeaderMapImpl headers{{"vary", ""}}; - std::vector result = - CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::CustomHeaders::get().Vary)); +TEST(CreateVaryIdentifier, IsStableForAllowListOrder) { + VaryAllowList vary_allow_list1(toStringMatchers({"width", "accept", "accept-language"})); + VaryAllowList vary_allow_list2(toStringMatchers({"accept", "width", "accept-language"})); - EXPECT_EQ(result.size(), 1); - EXPECT_EQ(result[0], ""); -} + Http::TestRequestHeaderMapImpl request_headers{ + {"accept", "image/*"}, {"accept-language", "en-us"}, {"width", "640"}}; -TEST(ParseCommaDelimitedList, SingleValue) { - Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}}; - std::vector result = - CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::CustomHeaders::get().Vary)); + absl::optional vary_identifier1 = VaryHeaderUtils::createVaryIdentifier( + vary_allow_list1, {"accept", "accept-language", "width"}, request_headers); + absl::optional vary_identifier2 = VaryHeaderUtils::createVaryIdentifier( + vary_allow_list2, {"accept", "accept-language", "width"}, request_headers); - EXPECT_EQ(result.size(), 1); - EXPECT_EQ(result[0], "accept"); + ASSERT_TRUE(vary_identifier1.has_value()); + ASSERT_TRUE(vary_identifier2.has_value()); + EXPECT_EQ(vary_identifier1.value(), vary_identifier2.value()); } -class ParseCommaDelimitedListMultipleTest : public testing::Test, - public testing::WithParamInterface { -protected: - Http::TestResponseHeaderMapImpl headers{{"vary", GetParam()}}; -}; +TEST(GetVaryValues, noVary) { + Http::TestResponseHeaderMapImpl headers; + EXPECT_EQ(0, VaryHeaderUtils::getVaryValues(headers).size()); +} -INSTANTIATE_TEST_SUITE_P(MultipleValuesMixedSpaces, ParseCommaDelimitedListMultipleTest, - testing::Values("accept,accept-language", " accept,accept-language", - "accept ,accept-language", "accept, accept-language", - "accept,accept-language ", " accept, accept-language ", - " accept , accept-language ")); +TEST(GetVaryValues, emptyVary) { + Http::TestResponseHeaderMapImpl headers{{"vary", ""}}; + EXPECT_EQ(0, VaryHeaderUtils::getVaryValues(headers).size()); +} -TEST_P(ParseCommaDelimitedListMultipleTest, MultipleValuesMixedSpaces) { - std::vector result = - CacheHeadersUtils::parseCommaDelimitedList(headers.get(Http::CustomHeaders::get().Vary)); - EXPECT_EQ(result.size(), 2); - EXPECT_EQ(result[0], "accept"); - EXPECT_EQ(result[1], "accept-language"); +TEST(GetVaryValues, singleVary) { + Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}}; + absl::btree_set result_set = VaryHeaderUtils::getVaryValues(headers); + std::vector result(result_set.begin(), result_set.end()); + std::vector expected = {"accept"}; + EXPECT_EQ(expected, result); +} + +TEST(GetVaryValues, multipleVaryAllowLists) { + Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}, {"vary", "origin"}}; + absl::btree_set result_set = VaryHeaderUtils::getVaryValues(headers); + std::vector result(result_set.begin(), result_set.end()); + std::vector expected = {"accept", "origin"}; + EXPECT_EQ(expected, result); } TEST(HasVary, Null) { Http::TestResponseHeaderMapImpl headers; - ASSERT_FALSE(VaryHeader::hasVary(headers)); + EXPECT_FALSE(VaryHeaderUtils::hasVary(headers)); } TEST(HasVary, Empty) { Http::TestResponseHeaderMapImpl headers{{"vary", ""}}; - ASSERT_FALSE(VaryHeader::hasVary(headers)); + EXPECT_FALSE(VaryHeaderUtils::hasVary(headers)); } TEST(HasVary, NotEmpty) { Http::TestResponseHeaderMapImpl headers{{"vary", "accept"}}; - ASSERT_TRUE(VaryHeader::hasVary(headers)); + EXPECT_TRUE(VaryHeaderUtils::hasVary(headers)); } -TEST(CreateVaryKey, EmptyVaryEntry) { - Http::TestResponseHeaderMapImpl response_headers{{"vary", ""}}; +TEST(CreateVaryIdentifier, EmptyVaryEntry) { Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}}; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); - ASSERT_EQ(VaryHeader::createVaryKey(response_headers.get(Http::CustomHeaders::get().Vary), - request_headers), - "vary-key\n\r\n"); + EXPECT_EQ(VaryHeaderUtils::createVaryIdentifier(vary_allow_list, {}, request_headers), + "vary-id\n"); } -TEST(CreateVaryKey, SingleHeaderExists) { - Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept"}}; +TEST(CreateVaryIdentifier, SingleHeaderExists) { Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}}; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); - ASSERT_EQ(VaryHeader::createVaryKey(response_headers.get(Http::CustomHeaders::get().Vary), - request_headers), - "vary-key\naccept\r" + EXPECT_EQ(VaryHeaderUtils::createVaryIdentifier(vary_allow_list, {"accept"}, request_headers), + "vary-id\naccept\r" "image/*\n"); } -TEST(CreateVaryKey, SingleHeaderMissing) { - Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept"}}; +TEST(CreateVaryIdentifier, SingleHeaderMissing) { Http::TestRequestHeaderMapImpl request_headers; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); - ASSERT_EQ(VaryHeader::createVaryKey(response_headers.get(Http::CustomHeaders::get().Vary), - request_headers), - "vary-key\naccept\r\n"); + EXPECT_EQ(VaryHeaderUtils::createVaryIdentifier(vary_allow_list, {"accept"}, request_headers), + "vary-id\naccept\r\n"); } -TEST(CreateVaryKey, MultipleHeadersAllExist) { - Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, accept-language, width"}}; +TEST(CreateVaryIdentifier, MultipleHeadersAllExist) { Http::TestRequestHeaderMapImpl request_headers{ {"accept", "image/*"}, {"accept-language", "en-us"}, {"width", "640"}}; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); - ASSERT_EQ(VaryHeader::createVaryKey(response_headers.get(Http::CustomHeaders::get().Vary), - request_headers), - "vary-key\naccept\r" + EXPECT_EQ(VaryHeaderUtils::createVaryIdentifier( + vary_allow_list, {"accept", "accept-language", "width"}, request_headers), + "vary-id\naccept\r" "image/*\naccept-language\r" "en-us\nwidth\r640\n"); } -TEST(CreateVaryKey, MultipleHeadersSomeExist) { +TEST(CreateVaryIdentifier, MultipleHeadersSomeExist) { Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, accept-language, width"}}; Http::TestRequestHeaderMapImpl request_headers{{"accept", "image/*"}, {"width", "640"}}; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); - ASSERT_EQ(VaryHeader::createVaryKey(response_headers.get(Http::CustomHeaders::get().Vary), - request_headers), - "vary-key\naccept\r" + EXPECT_EQ(VaryHeaderUtils::createVaryIdentifier( + vary_allow_list, {"accept", "accept-language", "width"}, request_headers), + "vary-id\naccept\r" "image/*\naccept-language\r\nwidth\r640\n"); } -TEST(CreateVaryKey, ExtraRequestHeaders) { - Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, width"}}; +TEST(CreateVaryIdentifier, ExtraRequestHeaders) { Http::TestRequestHeaderMapImpl request_headers{ {"accept", "image/*"}, {"heigth", "1280"}, {"width", "640"}}; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); - ASSERT_EQ(VaryHeader::createVaryKey(response_headers.get(Http::CustomHeaders::get().Vary), - request_headers), - "vary-key\naccept\r" - "image/*\nwidth\r640\n"); + EXPECT_EQ( + VaryHeaderUtils::createVaryIdentifier(vary_allow_list, {"accept", "width"}, request_headers), + "vary-id\naccept\r" + "image/*\nwidth\r640\n"); } -TEST(CreateVaryKey, MultipleHeadersNoneExist) { - Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, accept-language, width"}}; +TEST(CreateVaryIdentifier, MultipleHeadersNoneExist) { Http::TestRequestHeaderMapImpl request_headers; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); - ASSERT_EQ(VaryHeader::createVaryKey(response_headers.get(Http::CustomHeaders::get().Vary), - request_headers), - "vary-key\naccept\r\naccept-language\r\nwidth\r\n"); + EXPECT_EQ(VaryHeaderUtils::createVaryIdentifier( + vary_allow_list, {"accept", "accept-language", "width"}, request_headers), + "vary-id\naccept\r\naccept-language\r\nwidth\r\n"); } -TEST(CreateVaryKey, DifferentHeadersSameValue) { - // Two requests with the same value for different headers must have different vary-keys. - Http::TestResponseHeaderMapImpl response_headers{{"vary", "accept, accept-language"}}; +TEST(CreateVaryIdentifier, DifferentHeadersSameValue) { + // Two requests with the same value for different headers must have different + // vary-ids. + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); Http::TestRequestHeaderMapImpl request_headers1{{"accept", "foo"}}; - std::string vary_key1 = VaryHeader::createVaryKey( - response_headers.get(Http::CustomHeaders::get().Vary), request_headers1); + absl::optional vary_identifier1 = VaryHeaderUtils::createVaryIdentifier( + vary_allow_list, {"accept", "accept-language"}, request_headers1); Http::TestRequestHeaderMapImpl request_headers2{{"accept-language", "foo"}}; - std::string vary_key2 = VaryHeader::createVaryKey( - response_headers.get(Http::CustomHeaders::get().Vary), request_headers2); + absl::optional vary_identifier2 = VaryHeaderUtils::createVaryIdentifier( + vary_allow_list, {"accept", "accept-language", "width"}, request_headers2); - ASSERT_NE(vary_key1, vary_key2); + ASSERT_TRUE(vary_identifier1.has_value()); + ASSERT_TRUE(vary_identifier2.has_value()); + EXPECT_NE(vary_identifier1.value(), vary_identifier2.value()); } -TEST(CreateVaryKey, MultiValueSameHeader) { - Http::TestResponseHeaderMapImpl response_headers{{"vary", "width"}}; +TEST(CreateVaryIdentifier, MultiValueSameHeader) { Http::TestRequestHeaderMapImpl request_headers{{"width", "foo"}, {"width", "bar"}}; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); - ASSERT_EQ(VaryHeader::createVaryKey(response_headers.get(Http::CustomHeaders::get().Vary), - request_headers), - "vary-key\nwidth\r" + EXPECT_EQ(VaryHeaderUtils::createVaryIdentifier(vary_allow_list, {"width"}, request_headers), + "vary-id\nwidth\r" "foo\r" "bar\n"); } +TEST(CreateVaryIdentifier, DisallowedHeader) { + Http::TestRequestHeaderMapImpl request_headers{{"width", "foo"}}; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); + + EXPECT_EQ(VaryHeaderUtils::createVaryIdentifier(vary_allow_list, {"disallowed"}, request_headers), + absl::nullopt); +} + +TEST(CreateVaryIdentifier, DisallowedHeaderWithAllowedHeader) { + Http::TestRequestHeaderMapImpl request_headers{{"width", "foo"}}; + VaryAllowList vary_allow_list(toStringMatchers({"accept", "accept-language", "width"})); + + EXPECT_EQ( + VaryHeaderUtils::createVaryIdentifier(vary_allow_list, {"disallowed,width"}, request_headers), + absl::nullopt); +} + envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { // Allows {accept, accept-language, width} to be varied in the tests. envoy::extensions::filters::http::cache::v3alpha::CacheConfig config; @@ -691,108 +818,58 @@ envoy::extensions::filters::http::cache::v3alpha::CacheConfig getConfig() { return config; } -class VaryHeaderTest : public testing::Test { +class VaryAllowListTest : public testing::Test { protected: - VaryHeaderTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {} + VaryAllowListTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {} - VaryHeader vary_allow_list_; + VaryAllowList vary_allow_list_; Http::TestRequestHeaderMapImpl request_headers_; Http::TestResponseHeaderMapImpl response_headers_; }; -TEST_F(VaryHeaderTest, IsAllowedNull) { - ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_)); +TEST_F(VaryAllowListTest, AllowsHeaderAccept) { + EXPECT_TRUE(vary_allow_list_.allowsValue("accept")); +} + +TEST_F(VaryAllowListTest, AllowsHeaderWrongHeader) { + EXPECT_FALSE(vary_allow_list_.allowsValue("wrong-header")); +} + +TEST_F(VaryAllowListTest, AllowsHeaderEmpty) { EXPECT_FALSE(vary_allow_list_.allowsValue("")); } + +TEST_F(VaryAllowListTest, AllowsHeadersNull) { + EXPECT_TRUE(vary_allow_list_.allowsHeaders(response_headers_)); } -TEST_F(VaryHeaderTest, IsAllowedEmpty) { +TEST_F(VaryAllowListTest, AllowsHeadersEmpty) { response_headers_.addCopy("vary", ""); - ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_)); + EXPECT_TRUE(vary_allow_list_.allowsHeaders(response_headers_)); } -TEST_F(VaryHeaderTest, IsAllowedSingle) { +TEST_F(VaryAllowListTest, AllowsHeadersSingle) { response_headers_.addCopy("vary", "accept"); - ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_)); + EXPECT_TRUE(vary_allow_list_.allowsHeaders(response_headers_)); } -TEST_F(VaryHeaderTest, IsAllowedMultiple) { +TEST_F(VaryAllowListTest, AllowsHeadersMultiple) { response_headers_.addCopy("vary", "accept"); - ASSERT_TRUE(vary_allow_list_.isAllowed(response_headers_)); + EXPECT_TRUE(vary_allow_list_.allowsHeaders(response_headers_)); } -TEST_F(VaryHeaderTest, NotIsAllowedStar) { +TEST_F(VaryAllowListTest, NotAllowsHeadersStar) { // Should never be allowed, regardless of the allow_list. response_headers_.addCopy("vary", "*"); - ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_)); + EXPECT_FALSE(vary_allow_list_.allowsHeaders(response_headers_)); } -TEST_F(VaryHeaderTest, NotIsAllowedSingle) { +TEST_F(VaryAllowListTest, NotAllowsHeadersSingle) { response_headers_.addCopy("vary", "wrong-header"); - ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_)); + EXPECT_FALSE(vary_allow_list_.allowsHeaders(response_headers_)); } -TEST_F(VaryHeaderTest, NotIsAllowedMixed) { +TEST_F(VaryAllowListTest, NotAllowsHeadersMixed) { response_headers_.addCopy("vary", "accept, wrong-header"); - ASSERT_FALSE(vary_allow_list_.isAllowed(response_headers_)); -} - -TEST_F(VaryHeaderTest, PossibleVariedHeadersEmpty) { - Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - - EXPECT_TRUE(result->get(Http::LowerCaseString("accept")).empty()); - EXPECT_TRUE(result->get(Http::LowerCaseString("accept-language")).empty()); - EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); -} - -TEST_F(VaryHeaderTest, PossibleVariedHeadersNoOverlap) { - request_headers_.addCopy("abc", "123"); - Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - - EXPECT_TRUE(result->get(Http::LowerCaseString("accept")).empty()); - EXPECT_TRUE(result->get(Http::LowerCaseString("accept-language")).empty()); - EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); -} - -TEST_F(VaryHeaderTest, PossibleVariedHeadersOverlap) { - request_headers_.addCopy("abc", "123"); - request_headers_.addCopy("accept", "image/*"); - Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - - const auto values = result->get(Http::LowerCaseString("accept")); - ASSERT_EQ(values.size(), 1); - EXPECT_EQ(values[0]->value().getStringView(), "image/*"); - - EXPECT_TRUE(result->get(Http::LowerCaseString("accept-language")).empty()); - EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); -} - -TEST_F(VaryHeaderTest, PossibleVariedHeadersMultiValues) { - request_headers_.addCopy("accept", "image/*"); - request_headers_.addCopy("accept", "text/html"); - Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - - const auto values = result->get(Http::LowerCaseString("accept")); - ASSERT_EQ(values.size(), 2); - EXPECT_EQ(values[0]->value().getStringView(), "image/*"); - EXPECT_EQ(values[1]->value().getStringView(), "text/html"); - - EXPECT_TRUE(result->get(Http::LowerCaseString("accept-language")).empty()); - EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); -} - -TEST_F(VaryHeaderTest, PossibleVariedHeadersMultiHeaders) { - request_headers_.addCopy("accept", "image/*"); - request_headers_.addCopy("accept-language", "en-US"); - Http::HeaderMapPtr result = vary_allow_list_.possibleVariedHeaders(request_headers_); - - const auto values = result->get(Http::LowerCaseString("accept")); - ASSERT_EQ(values.size(), 1); - EXPECT_EQ(values[0]->value().getStringView(), "image/*"); - - const auto values2 = result->get(Http::LowerCaseString("accept-language")); - ASSERT_EQ(values2.size(), 1); - EXPECT_EQ(values2[0]->value(), "en-US"); - - EXPECT_TRUE(result->get(Http::LowerCaseString("width")).empty()); + EXPECT_FALSE(vary_allow_list_.allowsHeaders(response_headers_)); } } // namespace diff --git a/test/extensions/filters/http/cache/cacheability_utils_test.cc b/test/extensions/filters/http/cache/cacheability_utils_test.cc index 7c208861712dd..18ad3ebba33e0 100644 --- a/test/extensions/filters/http/cache/cacheability_utils_test.cc +++ b/test/extensions/filters/http/cache/cacheability_utils_test.cc @@ -42,7 +42,7 @@ class IsCacheableResponseTest : public testing::Test { Http::TestResponseHeaderMapImpl response_headers_ = {{":status", "200"}, {"date", "Sun, 06 Nov 1994 08:49:37 GMT"}, {"cache-control", cache_control_}}; - VaryHeader vary_allow_list_; + VaryAllowList vary_allow_list_; }; TEST_F(CanServeRequestFromCacheTest, CacheableRequest) { diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index 3ffc8089b47ec..5cf24b047d122 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -46,7 +46,7 @@ class LookupRequestTest : public testing::TestWithParam { Http::TestRequestHeaderMapImpl request_headers_{ {":path", "/"}, {":method", "GET"}, {":scheme", "https"}, {":authority", "example.com"}}; - VaryHeader vary_allow_list_; + VaryAllowList vary_allow_list_; static const SystemTime& currentTime() { CONSTRUCT_ON_FIRST_USE(SystemTime, Event::SimulatedTimeSystem().systemTime()); @@ -626,17 +626,6 @@ TEST_P(ParseInvalidRangeHeaderTest, InvalidRangeReturnsEmpty) { ASSERT_EQ(0, result_vector.size()); } -TEST_F(LookupRequestTest, VariedHeaders) { - request_headers_.addCopy("accept", "image/*"); - request_headers_.addCopy("other-header", "abc123"); - const LookupRequest lookup_request(request_headers_, currentTime(), vary_allow_list_); - const Http::RequestHeaderMap& result = lookup_request.getVaryHeaders(); - - ASSERT_FALSE(result.get(Http::LowerCaseString("accept")).empty()); - ASSERT_EQ(result.get(Http::LowerCaseString("accept"))[0]->value().getStringView(), "image/*"); - ASSERT_TRUE(result.get(Http::LowerCaseString("other-header")).empty()); -} - } // namespace } // namespace Cache } // namespace HttpFilters diff --git a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc index eaad85d95d162..76e2d1dc2e538 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc +++ b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc @@ -36,6 +36,19 @@ class SimpleHttpCacheTest : public testing::Test { request_headers_.setCopy(Http::CustomHeaders::get().CacheControl, "max-age=3600"); } + // Updates the cache entry's header + void updateHeaders(LookupContextPtr lookup, + const Http::TestResponseHeaderMapImpl& response_headers, + const ResponseMetadata& metadata) { + cache_.updateHeaders(*lookup, response_headers, metadata); + } + + void updateHeaders(absl::string_view request_path, + const Http::TestResponseHeaderMapImpl& response_headers, + const ResponseMetadata& metadata) { + updateHeaders(lookup(request_path), response_headers, metadata); + } + // Performs a cache lookup. LookupContextPtr lookup(absl::string_view request_path) { LookupRequest request = makeLookupRequest(request_path); @@ -71,6 +84,16 @@ class SimpleHttpCacheTest : public testing::Test { return body; } + Http::ResponseHeaderMapPtr getHeaders(LookupContext& context) { + Http::ResponseHeaderMapPtr response_headers_ptr; + context.getHeaders([&response_headers_ptr](LookupResult&& lookup_result) { + EXPECT_NE(lookup_result.cache_entry_status_, CacheEntryStatus::Unusable); + EXPECT_NE(lookup_result.headers_, nullptr); + response_headers_ptr = move(lookup_result.headers_); + }); + return response_headers_ptr; + } + LookupRequest makeLookupRequest(absl::string_view request_path) { request_headers_.setPath(request_path); return LookupRequest(request_headers_, current_time_, vary_allow_list_); @@ -96,19 +119,41 @@ class SimpleHttpCacheTest : public testing::Test { return AssertionSuccess(); } + AssertionResult expectLookupSuccessWithHeaders(LookupContext* lookup_context, + const Http::TestResponseHeaderMapImpl& headers) { + if (lookup_result_.cache_entry_status_ != CacheEntryStatus::Ok) { + return AssertionFailure() << "Expected: lookup_result_.cache_entry_status == " + "CacheEntryStatus::Ok\n Actual: " + << lookup_result_.cache_entry_status_; + } + if (!lookup_result_.headers_) { + return AssertionFailure() << "Expected nonnull lookup_result_.headers"; + } + if (!lookup_context) { + return AssertionFailure() << "Expected nonnull lookup_context"; + } + + Http::ResponseHeaderMapPtr actual_headers_ptr = getHeaders(*lookup_context); + if (!TestUtility::headerMapEqualIgnoreOrder(headers, *actual_headers_ptr)) { + return AssertionFailure() << "Expected headers: " << headers + << "\nActual: " << *actual_headers_ptr; + } + return AssertionSuccess(); + } + SimpleHttpCache cache_; LookupResult lookup_result_; Http::TestRequestHeaderMapImpl request_headers_; Event::SimulatedTimeSystem time_source_; SystemTime current_time_ = time_source_.systemTime(); DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; - VaryHeader vary_allow_list_; + VaryAllowList vary_allow_list_; }; // Simple flow of putting in an item, getting it, deleting it. TEST_F(SimpleHttpCacheTest, PutGet) { - const std::string RequestPath1("Name"); - LookupContextPtr name_lookup_context = lookup(RequestPath1); + const std::string request_path_1("/name"); + LookupContextPtr name_lookup_context = lookup(request_path_1); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, @@ -116,7 +161,7 @@ TEST_F(SimpleHttpCacheTest, PutGet) { const std::string Body1("Value"); insert(move(name_lookup_context), response_headers, Body1); - name_lookup_context = lookup(RequestPath1); + name_lookup_context = lookup(request_path_1); EXPECT_TRUE(expectLookupSuccessWithBody(name_lookup_context.get(), Body1)); const std::string& RequestPath2("Another Name"); @@ -125,14 +170,14 @@ TEST_F(SimpleHttpCacheTest, PutGet) { const std::string NewBody1("NewValue"); insert(move(name_lookup_context), response_headers, NewBody1); - EXPECT_TRUE(expectLookupSuccessWithBody(lookup(RequestPath1).get(), NewBody1)); + EXPECT_TRUE(expectLookupSuccessWithBody(lookup(request_path_1).get(), NewBody1)); } TEST_F(SimpleHttpCacheTest, PrivateResponse) { Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, {"age", "2"}, {"cache-control", "private,max-age=3600"}}; - const std::string request_path("Name"); + const std::string request_path("/name"); LookupContextPtr name_lookup_context = lookup(request_path); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); @@ -146,7 +191,7 @@ TEST_F(SimpleHttpCacheTest, PrivateResponse) { } TEST_F(SimpleHttpCacheTest, Miss) { - LookupContextPtr name_lookup_context = lookup("Name"); + LookupContextPtr name_lookup_context = lookup("/name"); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); } @@ -172,7 +217,7 @@ TEST_F(SimpleHttpCacheTest, Stale) { TEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) { request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "min-fresh=1000"); - const std::string request_path("Name"); + const std::string request_path("/name"); LookupContextPtr name_lookup_context = lookup(request_path); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); @@ -187,7 +232,7 @@ TEST_F(SimpleHttpCacheTest, RequestSmallMinFresh) { TEST_F(SimpleHttpCacheTest, ResponseStaleWithRequestLargeMaxStale) { request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "max-stale=9000"); - const std::string request_path("Name"); + const std::string request_path("/name"); LookupContextPtr name_lookup_context = lookup(request_path); EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); @@ -252,8 +297,79 @@ TEST_F(SimpleHttpCacheTest, VaryResponses) { insert(move(second_value_vary), response_headers, Body2); EXPECT_TRUE(expectLookupSuccessWithBody(lookup(RequestPath).get(), Body2)); + request_headers_.setCopy(Http::LowerCaseString("accept"), "image/*"); + LookupContextPtr first_value_lookup2 = lookup(RequestPath); // Looks up first version again to be sure it wasn't replaced with the second one. - EXPECT_TRUE(expectLookupSuccessWithBody(first_value_vary.get(), Body1)); + EXPECT_TRUE(expectLookupSuccessWithBody(first_value_lookup2.get(), Body1)); + + // Create a new allow list to make sure a now disallowed cached vary entry is not served. + Protobuf::RepeatedPtrField<::envoy::type::matcher::v3::StringMatcher> proto_allow_list; + ::envoy::type::matcher::v3::StringMatcher* matcher = proto_allow_list.Add(); + matcher->set_exact("width"); + vary_allow_list_ = VaryAllowList(proto_allow_list); + lookup(RequestPath); + EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); +} + +TEST_F(SimpleHttpCacheTest, VaryOnDisallowedKey) { + // Responses will vary on accept. + const std::string RequestPath("some-resource"); + Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, + {"cache-control", "public,max-age=3600"}, + {"vary", "user-agent"}}; + + // First request. + request_headers_.setCopy(Http::LowerCaseString("user-agent"), "user_agent_one"); + LookupContextPtr first_value_vary = lookup(RequestPath); + EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); + const std::string Body1("one"); + insert(move(first_value_vary), response_headers, Body1); + first_value_vary = lookup(RequestPath); + EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); +} + +TEST_F(SimpleHttpCacheTest, UpdateHeadersAndMetadata) { + const std::string request_path_1("/name"); + Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, + {"cache-control", "public,max-age=3600"}}; + insert(request_path_1, response_headers, "body"); + EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers)); + + // Update the date field in the headers + time_source_.advanceTimeWait(Seconds(3601)); + + response_headers = Http::TestResponseHeaderMapImpl{{"date", formatter_.fromTime(current_time_)}, + {"cache-control", "public,max-age=3600"}}; + updateHeaders(request_path_1, response_headers, {current_time_}); + EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers)); +} + +TEST_F(SimpleHttpCacheTest, UpdateHeadersForMissingKey) { + const std::string request_path_1("/name"); + Http::TestResponseHeaderMapImpl response_headers{{"date", formatter_.fromTime(current_time_)}, + {"cache-control", "public,max-age=3600"}}; + updateHeaders(request_path_1, response_headers, {current_time_}); + EXPECT_EQ(CacheEntryStatus::Unusable, lookup_result_.cache_entry_status_); +} + +TEST_F(SimpleHttpCacheTest, UpdateHeadersDisabledForVaryHeaders) { + const std::string request_path_1("/name"); + Http::TestResponseHeaderMapImpl response_headers_1{{"date", formatter_.fromTime(current_time_)}, + {"cache-control", "public,max-age=3600"}, + {"accept", "image/*"}, + {"vary", "accept"}}; + insert(request_path_1, response_headers_1, "body"); + EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers_1)); + + // Update the date field in the headers + time_source_.advanceTimeWait(Seconds(3601)); + Http::TestResponseHeaderMapImpl response_headers_2{{"date", formatter_.fromTime(current_time_)}, + {"cache-control", "public,max-age=3600"}, + {"accept", "image/*"}, + {"vary", "accept"}}; + updateHeaders(request_path_1, response_headers_2, {current_time_}); + + EXPECT_TRUE(expectLookupSuccessWithHeaders(lookup(request_path_1).get(), response_headers_1)); } } // namespace diff --git a/test/extensions/filters/http/common/fuzz/BUILD b/test/extensions/filters/http/common/fuzz/BUILD index ad1be79cf3413..04bc4579103fe 100644 --- a/test/extensions/filters/http/common/fuzz/BUILD +++ b/test/extensions/filters/http/common/fuzz/BUILD @@ -57,7 +57,6 @@ envoy_cc_test_library( "//test/test_common:test_runtime_lib", "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/tap/v3:pkg_cc_proto", ], ) diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.cc b/test/extensions/filters/http/common/fuzz/uber_filter.cc index 748c84b311cc6..7a4c5e0a915ea 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_filter.cc @@ -1,7 +1,6 @@ #include "test/extensions/filters/http/common/fuzz/uber_filter.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/http/message_impl.h" #include "source/common/http/utility.h" #include "source/common/protobuf/protobuf.h" diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index ce6720a2d2bf2..366bcacb5b245 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -1,6 +1,5 @@ #include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.h" #include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" -#include "envoy/extensions/filters/http/squash/v3/squash.pb.h" #include "envoy/extensions/filters/http/tap/v3/tap.pb.h" #include "source/common/tracing/http_tracer_impl.h" @@ -74,17 +73,6 @@ void UberFilterFuzzer::guideAnyProtoType(test::fuzz::HttpData* mutable_data, uin mutable_any->set_type_url(type_url); } -void cleanAttachmentTemplate(Protobuf::Message* message) { - envoy::extensions::filters::http::squash::v3::Squash& config = - dynamic_cast(*message); - std::string json; - Protobuf::util::JsonPrintOptions json_options; - if (!Protobuf::util::MessageToJsonString(config.attachment_template(), &json, json_options) - .ok()) { - config.clear_attachment_template(); - } -} - void cleanTapConfig(Protobuf::Message* message) { envoy::extensions::filters::http::tap::v3::Tap& config = dynamic_cast(*message); @@ -111,8 +99,6 @@ void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, if (filter_name == "envoy.filters.http.grpc_json_transcoder") { // Add a valid service proto descriptor. addBookstoreProtoDescriptor(message); - } else if (name == "envoy.filters.http.squash") { - cleanAttachmentTemplate(message); } else if (name == "envoy.filters.http.tap") { // TapDS oneof field and OutputSinkType StreamingGrpc not implemented cleanTapConfig(message); @@ -122,8 +108,8 @@ void UberFilterFuzzer::cleanFuzzedConfig(absl::string_view filter_name, void UberFilterFuzzer::perFilterSetup() { // Prepare expectations for the ext_authz filter. addr_ = std::make_shared("1.2.3.4", 1111); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); ON_CALL(factory_context_, clusterManager()).WillByDefault(testing::ReturnRef(cluster_manager_)); ON_CALL(cluster_manager_.thread_local_cluster_.async_client_, send_(_, _, _)) .WillByDefault(Return(&async_request_)); diff --git a/test/extensions/filters/http/composite/composite_filter_integration_test.cc b/test/extensions/filters/http/composite/composite_filter_integration_test.cc index b639e8cf3a7b5..cd7ffec42e28d 100644 --- a/test/extensions/filters/http/composite/composite_filter_integration_test.cc +++ b/test/extensions/filters/http/composite/composite_filter_integration_test.cc @@ -24,7 +24,7 @@ class CompositeFilterIntegrationTest : public testing::TestWithParam(config); + return std::make_unique>( + config); } Matchers::StringMatcherPtr makeStdRegexStringMatcher(const std::string& regex) { envoy::type::matcher::v3::StringMatcher config; config.MergeFrom(TestUtility::createRegexMatcher(regex)); - return std::make_unique(config); + return std::make_unique>( + config); } } // namespace diff --git a/test/extensions/filters/http/csrf/csrf_filter_test.cc b/test/extensions/filters/http/csrf/csrf_filter_test.cc index 0db5cd8376fe6..489ee5b205765 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_test.cc @@ -59,12 +59,12 @@ class CsrfFilterTest : public testing::Test { } void setRoutePolicy(const CsrfPolicy* policy) { - ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(filter_name_)) + ON_CALL(*decoder_callbacks_.route_, mostSpecificPerFilterConfig(filter_name_)) .WillByDefault(Return(policy)); } void setVirtualHostPolicy(const CsrfPolicy* policy) { - ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig(filter_name_)) + ON_CALL(*decoder_callbacks_.route_, mostSpecificPerFilterConfig(filter_name_)) .WillByDefault(Return(policy)); } diff --git a/test/extensions/filters/http/dynamic_forward_proxy/BUILD b/test/extensions/filters/http/dynamic_forward_proxy/BUILD index 510387a9f8e0c..ec2a0c9534eda 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/BUILD +++ b/test/extensions/filters/http/dynamic_forward_proxy/BUILD @@ -55,6 +55,7 @@ envoy_extension_cc_test( deps = [ "//source/extensions/clusters/dynamic_forward_proxy:cluster", "//source/extensions/filters/http/dynamic_forward_proxy:config", + "//source/extensions/key_value/file_based:config_lib", "//test/integration:http_integration_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index 297fb4f33538d..6b6ff51010c10 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -18,8 +18,9 @@ class ProxyFilterIntegrationTest : public testing::TestWithParamset_auto_sni(true); + protocol_options.mutable_upstream_http_protocol_options()->set_auto_san_validation(true); + protocol_options.mutable_explicit_http_config()->mutable_http_protocol_options(); + ConfigHelper::setProtocolOptions(cluster_, protocol_options); + if (upstream_tls_) { envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; auto* validation_context = @@ -76,8 +89,14 @@ name: envoy.clusters.dynamic_forward_proxy max_hosts: {} dns_cache_circuit_breaker: max_pending_requests: {} + key_value_config: + config: + name: envoy.key_value.file_based + typed_config: + "@type": type.googleapis.com/envoy.extensions.key_value.file_based.v3.FileBasedKeyValueStoreConfig + filename: {} )EOF", - Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests); + Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests, filename); TestUtility::loadFromYaml(cluster_type_config, *cluster_.mutable_cluster_type()); cluster_.mutable_circuit_breakers() @@ -100,12 +119,20 @@ name: envoy.clusters.dynamic_forward_proxy } else { HttpIntegrationTest::createUpstreams(); } + if (write_cache_file_) { + std::string host = + fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port()); + std::string value = fake_upstreams_[0]->localAddress()->asString(); + TestEnvironment::writeStringToFileForTest( + "dns_cache.txt", absl::StrCat(host.length(), "\n", host, value.length(), "\n", value)); + } } bool upstream_tls_{}; std::string upstream_cert_name_{"upstreamlocalhost"}; CdsHelper cds_helper_; envoy::config::cluster::v3::Cluster cluster_; + bool write_cache_file_{}; }; INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterIntegrationTest, @@ -115,7 +142,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyFilterIntegrationTest, // A basic test where we pause a request to lookup localhost, and then do another request which // should hit the TLS cache. TEST_P(ProxyFilterIntegrationTest, RequestWithBody) { - setup(); + initializeWithArgs(); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ {":method", "POST"}, @@ -140,7 +167,7 @@ TEST_P(ProxyFilterIntegrationTest, RequestWithBody) { // Verify that after we populate the cache and reload the cluster we reattach to the cache with // its existing hosts. TEST_P(ProxyFilterIntegrationTest, ReloadClusterAndAttachToCache) { - setup(); + initializeWithArgs(); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ {":method", "POST"}, @@ -176,7 +203,7 @@ TEST_P(ProxyFilterIntegrationTest, ReloadClusterAndAttachToCache) { // Verify that we expire hosts. TEST_P(ProxyFilterIntegrationTest, RemoveHostViaTTL) { - setup(); + initializeWithArgs(); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ {":method", "POST"}, @@ -201,7 +228,7 @@ TEST_P(ProxyFilterIntegrationTest, RemoveHostViaTTL) { // Test DNS cache host overflow. TEST_P(ProxyFilterIntegrationTest, DNSCacheHostOverflow) { - setup(1); + initializeWithArgs(1); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ @@ -230,7 +257,7 @@ TEST_P(ProxyFilterIntegrationTest, DNSCacheHostOverflow) { // Verify that upstream TLS works with auto verification for SAN as well as auto setting SNI. TEST_P(ProxyFilterIntegrationTest, UpstreamTls) { upstream_tls_ = true; - setup(); + initializeWithArgs(); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ {":method", "POST"}, @@ -254,7 +281,7 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTls) { TEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithIpHost) { upstream_tls_ = true; - setup(); + initializeWithArgs(); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ {":method", "POST"}, @@ -280,7 +307,7 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithIpHost) { TEST_P(ProxyFilterIntegrationTest, UpstreamTlsInvalidSAN) { upstream_tls_ = true; upstream_cert_name_ = "upstream"; - setup(); + initializeWithArgs(); fake_upstreams_[0]->setReadDisableOnNewConnection(false); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -299,7 +326,7 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTlsInvalidSAN) { } TEST_P(ProxyFilterIntegrationTest, DnsCacheCircuitBreakersInvoked) { - setup(1024, 0); + initializeWithArgs(1024, 0); codec_client_ = makeHttpConnection(lookupPort("http")); const Http::TestRequestHeaderMapImpl request_headers{ @@ -317,5 +344,25 @@ TEST_P(ProxyFilterIntegrationTest, DnsCacheCircuitBreakersInvoked) { EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } +#ifndef WIN32 +// TODO(alyssawilk) figure out why this test doesn't pass on windows. +TEST_P(ProxyFilterIntegrationTest, UseCacheFile) { + write_cache_file_ = true; + + initializeWithArgs(); + codec_client_ = makeHttpConnection(lookupPort("http")); + std::string host = fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port()); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", host}}; + + auto response = + sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024); + checkSimpleRequestSuccess(1024, 1024, response.get()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.cache_load")->value()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_query_attempt")->value()); + EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); +} +#endif + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc index 4a209304efef8..72a611f703a2a 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_test.cc @@ -286,8 +286,8 @@ TEST_F(ProxyFilterTest, HostRewrite) { EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); - EXPECT_CALL(callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.dynamic_forward_proxy")) + EXPECT_CALL(*callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.dynamic_forward_proxy")) .WillOnce(Return(&config)); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("bar"), 80, _)) .WillOnce(Return( @@ -315,8 +315,8 @@ TEST_F(ProxyFilterTest, HostRewriteViaHeader) { EXPECT_CALL(*transport_socket_factory_, implementsSecureTransport()).WillOnce(Return(false)); Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); - EXPECT_CALL(callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.dynamic_forward_proxy")) + EXPECT_CALL(*callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.dynamic_forward_proxy")) .WillOnce(Return(&config)); EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("bar:82"), 80, _)) .WillOnce(Return( diff --git a/test/extensions/filters/http/ext_authz/BUILD b/test/extensions/filters/http/ext_authz/BUILD index 6cf4e0f99615f..6076c5d7b3cfe 100644 --- a/test/extensions/filters/http/ext_authz/BUILD +++ b/test/extensions/filters/http/ext_authz/BUILD @@ -32,6 +32,7 @@ envoy_extension_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/tracing:tracing_mocks", "//test/mocks/upstream:cluster_manager_mocks", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/ext_authz/v3:pkg_cc_proto", @@ -56,10 +57,15 @@ envoy_extension_cc_test( envoy_extension_cc_test( name = "ext_authz_integration_test", srcs = ["ext_authz_integration_test.cc"], + data = [ + "ext_authz.yaml", + ], extension_names = ["envoy.filters.http.ext_authz"], deps = [ "//source/extensions/filters/http/ext_authz:config", + "//source/server/config_validation:server_lib", "//test/integration:http_integration_lib", + "//test/mocks/server:options_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index 8e74e5e37e19f..25f626a7747b8 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -21,54 +21,73 @@ namespace HttpFilters { namespace ExtAuthz { namespace { -void expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion api_version) { - std::unique_ptr _deprecated_v2_api; - if (api_version != envoy::config::core::v3::ApiVersion::V3) { - _deprecated_v2_api = std::make_unique(); - } - std::string yaml = R"EOF( - transport_api_version: V3 - grpc_service: - google_grpc: - target_uri: ext_authz_server - stat_prefix: google - failure_mode_allow: false - transport_api_version: {} - )EOF"; - +void expectCorrectProtoGrpc(std::string const& grpc_service_yaml) { ExtAuthzFilterConfig factory; ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); - TestUtility::loadFromYaml( - fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config); + TestUtility::loadFromYaml(grpc_service_yaml, *proto_config); testing::StrictMock context; testing::StrictMock server_context; EXPECT_CALL(context, getServerFactoryContext()) .WillRepeatedly(testing::ReturnRef(server_context)); EXPECT_CALL(context, messageValidationVisitor()); - EXPECT_CALL(context, clusterManager()); + EXPECT_CALL(context, clusterManager()).Times(2); EXPECT_CALL(context, runtime()); - EXPECT_CALL(context, scope()).Times(2); + EXPECT_CALL(context, scope()).Times(3); + + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + // Expect the raw async client to be created inside the callback. + // The creation of the filter callback is in main thread while the execution of callback is in + // worker thread. Because of the thread local cache of async client, it must be created in worker + // thread inside the callback. EXPECT_CALL(context.cluster_manager_.async_client_manager_, getOrCreateRawAsyncClient(_, _, _, _)) .WillOnce(Invoke( [](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool, Grpc::CacheOption) { return std::make_unique>(); })); - - Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); - Http::MockFilterChainFactoryCallbacks filter_callback; - EXPECT_CALL(filter_callback, addStreamFilter(_)); cb(filter_callback); + + Thread::ThreadPtr thread = Thread::threadFactoryForTest().createThread([&context, cb]() { + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamFilter(_)); + // Execute the filter factory callback in another thread. + EXPECT_CALL(context.cluster_manager_.async_client_manager_, + getOrCreateRawAsyncClient(_, _, _, _)) + .WillOnce(Invoke( + [](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool, + Grpc::CacheOption) { return std::make_unique>(); })); + cb(filter_callback); + }); + thread->join(); } } // namespace -TEST(HttpExtAuthzConfigTest, CorrectProtoGrpc) { -#ifndef ENVOY_DISABLE_DEPRECATED_FEATURES - expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::AUTO); - expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::V2); -#endif - expectCorrectProtoGrpc(envoy::config::core::v3::ApiVersion::V3); +TEST(HttpExtAuthzConfigTest, CorrectProtoGoogleGrpc) { + std::string google_grpc_service_yaml = R"EOF( + transport_api_version: V3 + grpc_service: + google_grpc: + target_uri: ext_authz_server + stat_prefix: google + failure_mode_allow: false + transport_api_version: V3 + )EOF"; + expectCorrectProtoGrpc(google_grpc_service_yaml); +} + +TEST(HttpExtAuthzConfigTest, CorrectProtoEnvoyGrpc) { + std::string envoy_grpc_service_yaml = R"EOF( + transport_api_version: V3 + grpc_service: + envoy_grpc: + cluster_name: ext_authz_server + failure_mode_allow: false + transport_api_version: V3 + )EOF"; + expectCorrectProtoGrpc(envoy_grpc_service_yaml); } TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { diff --git a/test/extensions/filters/http/ext_authz/ext_authz.yaml b/test/extensions/filters/http/ext_authz/ext_authz.yaml new file mode 100644 index 0000000000000..e9c0877180ef0 --- /dev/null +++ b/test/extensions/filters/http/ext_authz/ext_authz.yaml @@ -0,0 +1,77 @@ +# Regression test for https://github.com/envoyproxy/envoy/issues/17344 +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8080 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + route: + cluster: local_service + http_filters: + - name: envoy.ext_authz + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz + failure_mode_allow: false + transport_api_version: V3 + status_on_error: + code: 503 + grpc_service: + envoy_grpc: + cluster_name: ext_authz-service + timeout: 0.5s + with_request_body: + max_request_bytes: 10240 + allow_partial_message: true + pack_as_bytes: false + - name: envoy.filters.http.router + typed_config: {} + clusters: + - name: local_service + connect_timeout: 30s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: local_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: main + port_value: 8080 + - name: ext_authz-service + type: STRICT_DNS + lb_policy: ROUND_ROBIN + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: ext_authz-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: opa + port_value: 80 +admin: + address: + socket_address: + address: 0.0.0.0 + port_value: 8081 diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index e5d41543b8548..5aa3d51953bfd 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -4,9 +4,11 @@ #include "envoy/service/auth/v3/external_auth.pb.h" #include "source/common/common/macros.h" +#include "source/server/config_validation/server.h" #include "test/common/grpc/grpc_client_integration.h" #include "test/integration/http_integration.h" +#include "test/mocks/server/options.h" #include "test/test_common/utility.h" #include "absl/strings/str_format.h" @@ -21,7 +23,7 @@ namespace Envoy { using Headers = std::vector>; -class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, +class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { public: ExtAuthzGrpcIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) {} @@ -32,9 +34,6 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP } void initializeConfig(bool disable_with_metadata = false) { - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { - config_helper_.enableDeprecatedV2Api(); - } config_helper_.addConfigModifier([this, disable_with_metadata]( envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* ext_authz_cluster = bootstrap.mutable_static_resources()->add_clusters(); @@ -58,7 +57,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP } proto_config_.mutable_deny_at_disable()->set_runtime_key("envoy.ext_authz.deny_at_disable"); proto_config_.mutable_deny_at_disable()->mutable_default_value()->set_value(false); - proto_config_.set_transport_api_version(apiVersion()); + proto_config_.set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); // Add labels and verify they are passed. std::map labels; @@ -101,8 +100,10 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP const Headers& headers_to_remove = Headers{}) { auto conn = makeClientConnection(lookupPort("http")); codec_client_ = makeHttpConnection(std::move(conn)); - Http::TestRequestHeaderMapImpl headers{ - {":method", "POST"}, {":path", "/test"}, {":scheme", "http"}, {":authority", "host"}}; + Http::TestRequestHeaderMapImpl headers{{":method", "POST"}, {":path", "/test"}, + {":scheme", "http"}, {":authority", "host"}, + {"x-duplicate", "one"}, {"x-duplicate", "two"}, + {"x-duplicate", "three"}}; // Initialize headers to append. If the authorization server returns any matching keys with one // of value in headers_to_add, the header entry from authorization server replaces the one in @@ -140,8 +141,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP RELEASE_ASSERT(result, result.message()); EXPECT_EQ("POST", ext_authz_request_->headers().getMethodValue()); - EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.auth.{}.Authorization", "Check", - apiVersion()), + EXPECT_EQ("/envoy.service.auth.v3.Authorization/Check", ext_authz_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", ext_authz_request_->headers().getContentTypeValue()); @@ -155,6 +155,9 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::VersionedGrpcClientIntegrationP EXPECT_EQ("value_1", attributes->destination().labels().at("label_1")); EXPECT_EQ("value_2", attributes->destination().labels().at("label_2")); + // Duplicate headers in the check request should be merged. + EXPECT_EQ("one,two,three", (*http_request->mutable_headers())["x-duplicate"]); + // Clear fields which are not relevant. attributes->clear_source(); attributes->clear_destination(); @@ -447,6 +450,9 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, {"baz", "foo"}, {"bat", "foo"}, {"remove-me", "upstream-should-not-see-me"}, + {"x-duplicate", "one"}, + {"x-duplicate", "two"}, + {"x-duplicate", "three"}, }); } @@ -459,6 +465,12 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, result = ext_authz_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); + // Duplicate headers in the check request should be merged. + const auto duplicate = + ext_authz_request_->headers().get(Http::LowerCaseString(std::string("x-duplicate"))); + EXPECT_EQ(1, duplicate.size()); + EXPECT_EQ("one,two,three", duplicate[0]->value().getStringView()); + // Send back authorization response with "baz" and "bat" headers. // Also add multiple values "append-foo" and "append-bar" for key "x-append-bat". // Also tell Envoy to remove "remove-me" header before sending to upstream. @@ -571,6 +583,7 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, allowed_headers: patterns: - exact: X-Case-Sensitive-Header + - exact: x-duplicate authorization_response: allowed_upstream_headers: @@ -588,34 +601,30 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, }; INSTANTIATE_TEST_SUITE_P(IpVersionsCientType, ExtAuthzGrpcIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); // Verifies that the request body is included in the CheckRequest when the downstream protocol is // HTTP/1.1. TEST_P(ExtAuthzGrpcIntegrationTest, HTTP1DownstreamRequestWithBody) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectCheckRequestWithBody(Http::CodecType::HTTP1, 4); } // Verifies that the request body is included in the CheckRequest when the downstream protocol is // HTTP/1.1 and the size of the request body is larger than max_request_bytes. TEST_P(ExtAuthzGrpcIntegrationTest, HTTP1DownstreamRequestWithLargeBody) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectCheckRequestWithBody(Http::CodecType::HTTP1, 2048); } // Verifies that the request body is included in the CheckRequest when the downstream protocol is // HTTP/2. TEST_P(ExtAuthzGrpcIntegrationTest, HTTP2DownstreamRequestWithBody) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectCheckRequestWithBody(Http::CodecType::HTTP2, 4); } // Verifies that the request body is included in the CheckRequest when the downstream protocol is // HTTP/2 and the size of the request body is larger than max_request_bytes. TEST_P(ExtAuthzGrpcIntegrationTest, HTTP2DownstreamRequestWithLargeBody) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectCheckRequestWithBody(Http::CodecType::HTTP2, 2048); } @@ -623,7 +632,6 @@ TEST_P(ExtAuthzGrpcIntegrationTest, HTTP2DownstreamRequestWithLargeBody) { // server returns headers_to_add, response_headers_to_add, and headers_to_append in OkResponse // message. TEST_P(ExtAuthzGrpcIntegrationTest, SendHeadersToAddAndToAppendToUpstream) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectCheckRequestWithBodyWithHeaders( Http::CodecType::HTTP1, 4, /*headers_to_add=*/Headers{{"header1", "header1"}}, @@ -636,27 +644,22 @@ TEST_P(ExtAuthzGrpcIntegrationTest, SendHeadersToAddAndToAppendToUpstream) { } TEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisable) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectFilterDisableCheck(/*deny_at_disable=*/false, /*disable_with_metadata=*/false, "200"); } TEST_P(ExtAuthzGrpcIntegrationTest, AllowAtDisableWithMetadata) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectFilterDisableCheck(/*deny_at_disable=*/false, /*disable_with_metadata=*/true, "200"); } TEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisable) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectFilterDisableCheck(/*deny_at_disable=*/true, /*disable_with_metadata=*/false, "403"); } TEST_P(ExtAuthzGrpcIntegrationTest, DenyAtDisableWithMetadata) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; expectFilterDisableCheck(/*deny_at_disable=*/true, /*disable_with_metadata=*/true, "403"); } TEST_P(ExtAuthzGrpcIntegrationTest, DownstreamHeadersOnSuccess) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; // Set up ext_authz filter. initializeConfig(); @@ -698,6 +701,47 @@ TEST_P(ExtAuthzHttpIntegrationTest, DefaultCaseSensitiveStringMatcher) { ASSERT_TRUE(header_entry.empty()); } +TEST_P(ExtAuthzHttpIntegrationTest, DirectReponse) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* virtual_hosts = hcm.mutable_route_config()->mutable_virtual_hosts(0); + virtual_hosts->mutable_routes(0)->clear_route(); + envoy::config::route::v3::Route* route = virtual_hosts->mutable_routes(0); + route->mutable_direct_response()->set_status(204); + }); + + initializeConfig(); + HttpIntegrationTest::initialize(); + initiateClientConnection(); + waitForExtAuthzRequest(); + + ASSERT_TRUE(response_->waitForEndStream()); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ("204", response_->headers().Status()->value().getStringView()); +} + +TEST_P(ExtAuthzHttpIntegrationTest, RedirectResponse) { + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* virtual_hosts = hcm.mutable_route_config()->mutable_virtual_hosts(0); + virtual_hosts->mutable_routes(0)->clear_route(); + envoy::config::route::v3::Route* route = virtual_hosts->mutable_routes(0); + route->mutable_redirect()->set_path_redirect("/redirect"); + }); + + initializeConfig(); + HttpIntegrationTest::initialize(); + initiateClientConnection(); + waitForExtAuthzRequest(); + + ASSERT_TRUE(response_->waitForEndStream()); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ("301", response_->headers().Status()->value().getStringView()); + EXPECT_EQ("http://host/redirect", response_->headers().getLocationValue()); +} + class ExtAuthzLocalReplyIntegrationTest : public HttpIntegrationTest, public TestWithParam { public: @@ -810,7 +854,6 @@ TEST_P(ExtAuthzLocalReplyIntegrationTest, DeniedHeaderTest) { } TEST_P(ExtAuthzGrpcIntegrationTest, GoogleAsyncClientCreation) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initializeConfig(); setDownstreamProtocol(Http::CodecType::HTTP2); HttpIntegrationTest::initialize(); @@ -846,8 +889,7 @@ TEST_P(ExtAuthzGrpcIntegrationTest, GoogleAsyncClientCreation) { RELEASE_ASSERT(result, result.message()); EXPECT_EQ("POST", ext_authz_request_->headers().getMethodValue()); - EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.auth.{}.Authorization", "Check", - apiVersion()), + EXPECT_EQ("/envoy.service.auth.v3.Authorization/Check", ext_authz_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", ext_authz_request_->headers().getContentTypeValue()); result = ext_authz_request_->waitForEndStream(*dispatcher_); @@ -887,4 +929,13 @@ TEST_P(ExtAuthzGrpcIntegrationTest, GoogleAsyncClientCreation) { cleanup(); } +// Regression test for https://github.com/envoyproxy/envoy/issues/17344 +TEST(ExtConfigValidateTest, Validate) { + Server::TestComponentFactory component_factory; + EXPECT_TRUE(validateConfig(testing::NiceMock(TestEnvironment::runfilesPath( + "test/extensions/filters/http/ext_authz/ext_authz.yaml")), + Network::Address::InstanceConstSharedPtr(), component_factory, + Thread::threadFactoryForTest(), Filesystem::fileSystemForTest())); +} + } // namespace Envoy diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 666870baf0b71..18283f359bc95 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -26,6 +26,7 @@ #include "test/mocks/tracing/mocks.h" #include "test/mocks/upstream/cluster_manager.h" #include "test/test_common/printers.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -65,8 +66,8 @@ template class HttpFilterTestBase : public T { void prepareCheck() { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); } NiceMock stats_store_; @@ -234,8 +235,8 @@ TEST_F(HttpFilterTest, ErrorFailClose) { )EOF"); ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks, @@ -273,8 +274,8 @@ TEST_F(HttpFilterTest, ErrorCustomStatusCode) { )EOF"); ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks, @@ -313,8 +314,8 @@ TEST_F(HttpFilterTest, ErrorOpen) { )EOF"); ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks, @@ -347,8 +348,8 @@ TEST_F(HttpFilterTest, ImmediateErrorOpen) { )EOF"); ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); Filters::Common::ExtAuthz::Response response{}; response.status = Filters::Common::ExtAuthz::CheckStatus::Error; @@ -437,8 +438,8 @@ TEST_F(HttpFilterTest, RequestDataWithPartialMessage) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(*client_, check(_, _, _, _)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, @@ -479,8 +480,8 @@ TEST_F(HttpFilterTest, RequestDataWithPartialMessageThenContinueDecoding) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); // The check call should only be called once. EXPECT_CALL(*client_, check(_, _, testing::A(), _)) @@ -535,8 +536,8 @@ TEST_F(HttpFilterTest, RequestDataWithSmallBuffer) { ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(*client_, check(_, _, _, _)); EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, @@ -1487,9 +1488,6 @@ TEST_P(HttpFilterTestParam, ContextExtensions) { "default_route_value"; // Initialize the virtual host's per filter config. FilterConfigPerRoute auth_per_vhost(settingsvhost); - ON_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.ext_authz")) - .WillByDefault(Return(&auth_per_vhost)); // Place something in the context extensions on the route. envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settingsroute; @@ -1497,8 +1495,16 @@ TEST_P(HttpFilterTestParam, ContextExtensions) { "value_route"; // Initialize the route's per filter config. FilterConfigPerRoute auth_per_route(settingsroute); - ON_CALL(filter_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.ext_authz")) - .WillByDefault(Return(&auth_per_route)); + + EXPECT_CALL(*filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.ext_authz")) + .WillOnce(Return(&auth_per_route)); + EXPECT_CALL(*filter_callbacks_.route_, traversePerFilterConfig("envoy.filters.http.ext_authz", _)) + .WillOnce(Invoke([&](const std::string&, + std::function cb) { + cb(auth_per_vhost); + cb(auth_per_route); + })); prepareCheck(); @@ -1527,7 +1533,7 @@ TEST_P(HttpFilterTestParam, DisabledOnRoute) { prepareCheck(); - ON_CALL(filter_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.ext_authz")) + ON_CALL(*filter_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.ext_authz")) .WillByDefault(Return(&auth_per_route)); auto test_disable = [&](bool disabled) { @@ -1558,7 +1564,7 @@ TEST_P(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings; FilterConfigPerRoute auth_per_route(settings); - ON_CALL(filter_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.ext_authz")) + ON_CALL(*filter_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.ext_authz")) .WillByDefault(Return(&auth_per_route)); auto test_disable = [&](bool disabled) { @@ -1598,14 +1604,30 @@ TEST_P(HttpFilterTestParam, DisabledOnRouteWithRequestBody) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); } -// Test that the request continues when the filter_callbacks has no route. +// Test that authentication will do when the filter_callbacks has no route.(both +// direct response and redirect have no route) TEST_P(HttpFilterTestParam, NoRoute) { - EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr)); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillRepeatedly(Return(nullptr)); + prepareCheck(); + EXPECT_CALL(*client_, check(_, _, _, _)); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); } +// Test that the authentication will be skipped when the filter_callbacks has no route(both +// direct response and redirect have no route) when the runtime flag +// `envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect` is false. +TEST_P(HttpFilterTestParam, NoRouteWithSkipAuth) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.http_ext_authz_do_not_skip_direct_response_and_redirect", + "false"}}); + EXPECT_CALL(*filter_callbacks_.route_, routeEntry()).WillOnce(Return(nullptr)); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); +} + // Test that the request is stopped till there is an OK response back after which it continues on. TEST_P(HttpFilterTestParam, OkResponse) { InSequence s; @@ -2136,7 +2158,7 @@ TEST_P(HttpFilterTestParam, NoCluster) { "value_route"; // Initialize the route's per filter config. FilterConfigPerRoute auth_per_route(settingsroute); - ON_CALL(filter_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.ext_authz")) + ON_CALL(*filter_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.ext_authz")) .WillByDefault(Return(&auth_per_route)); prepareCheck(); @@ -2162,7 +2184,7 @@ TEST_P(HttpFilterTestParam, DisableRequestBodyBufferingOnRoute) { envoy::extensions::filters::http::ext_authz::v3::ExtAuthzPerRoute settings; FilterConfigPerRoute auth_per_route(settings); - ON_CALL(filter_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.ext_authz")) + ON_CALL(*filter_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.ext_authz")) .WillByDefault(Return(&auth_per_route)); auto test_disable_request_body_buffering = [&](bool bypass) { @@ -2195,8 +2217,8 @@ TEST_P(HttpFilterTestParam, DisableRequestBodyBufferingOnRoute) { test_disable_request_body_buffering(true); // When request body buffering is skipped, setDecoderBufferLimit is not called. EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); - connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress(addr_); + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress(addr_); EXPECT_CALL(*client_, check(_, _, _, _)); EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, filter_->decodeHeaders(request_headers_, false)); diff --git a/test/extensions/filters/http/ext_proc/BUILD b/test/extensions/filters/http/ext_proc/BUILD index 779252a19a27c..94a5abbb88ff2 100644 --- a/test/extensions/filters/http/ext_proc/BUILD +++ b/test/extensions/filters/http/ext_proc/BUILD @@ -1,5 +1,6 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_fuzz_test", "envoy_package", ) load( @@ -41,6 +42,16 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "state_test", + size = "small", + srcs = ["state_test.cc"], + extension_names = ["envoy.filters.http.ext_proc"], + deps = [ + "//source/extensions/filters/http/ext_proc", + ], +) + envoy_extension_cc_test( name = "ordering_test", size = "small", @@ -124,7 +135,10 @@ envoy_extension_cc_test_library( hdrs = ["test_processor.h"], extension_names = ["envoy.filters.http.ext_proc"], deps = [ + "//envoy/network:address_interface", + "//test/test_common:network_utility_lib", "@com_github_grpc_grpc//:grpc++", + "@com_google_absl//absl/strings:str_format", "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_grpc", "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", ], @@ -152,3 +166,44 @@ envoy_extension_cc_test_library( "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) + +envoy_extension_cc_test_library( + name = "ext_proc_grpc_fuzz_lib", + srcs = ["ext_proc_grpc_fuzz_helper.cc"], + hdrs = ["ext_proc_grpc_fuzz_helper.h"], + extension_names = ["envoy.filters.http.ext_proc"], + deps = [ + "//source/common/common:thread_lib", + "//source/common/grpc:common_lib", + "//test/common/http:common_lib", + "//test/fuzz:fuzz_runner_lib", + "//test/fuzz:utility_lib", + "//test/integration:http_integration_lib", + "//test/test_common:utility_lib", + "@com_github_grpc_grpc//:grpc++", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/type/v3:pkg_cc_proto", + ], +) + +envoy_cc_fuzz_test( + name = "ext_proc_grpc_fuzz_test", + srcs = ["ext_proc_grpc_fuzz.cc"], + corpus = "ext_proc_grpc_corpus", + deps = [ + ":ext_proc_grpc_fuzz_lib", + ":test_processor_lib", + "//source/common/network:address_lib", + "//source/extensions/filters/http/ext_proc:config", + "//test/common/http:common_lib", + "//test/fuzz:utility_lib", + "//test/integration:http_integration_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", + "@envoy_api//envoy/type/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_corpus/test b/test/extensions/filters/http/ext_proc/ext_proc_grpc_corpus/test new file mode 100644 index 0000000000000..aa15709f3ba56 --- /dev/null +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_corpus/test @@ -0,0 +1 @@ +FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc new file mode 100644 index 0000000000000..85f01a408c12f --- /dev/null +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc @@ -0,0 +1,315 @@ +// TODO(ikepolinsky): Major action items to improve this fuzzer +// 1. Move external process from separate thread to have test all in one thread +// - Explore using fake gRPC client for this +// 2. Implement sending trailers from downstream and mutating headers/trailers +// in the external process. +// 3. Use an upstream that sends varying responses (also with trailers) +// 4. Explore performance optimizations: +// - Threads and fake gRPC client above might help +// - Local testing had almost 800k inline 8 bit counters resulting in ~3 +// exec/s. How far can we reduce the number of counters? +// - At the loss of reproducibility use a persistent envoy +// 5. Protobuf fuzzing would greatly increase crash test case readability +// - How will this impact speed? +// - Can it be done on single thread as well? +// 6. Restructure to inherit common functions between ExtProcIntegrationTest +// and this class. This involves adding a new ExtProcIntegrationBase class +// common to both. +// 7. Remove locks after crash is addressed by separate issue + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/type/v3/http_status.pb.h" + +#include "source/common/network/address_impl.h" + +#include "test/common/http/common.h" +#include "test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h" +#include "test/extensions/filters/http/ext_proc/test_processor.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/integration/http_integration.h" +#include "test/test_common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace ExternalProcessing { + +using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; +using envoy::service::ext_proc::v3alpha::ProcessingRequest; +using envoy::service::ext_proc::v3alpha::ProcessingResponse; + +// The buffer size for the listeners +static const uint32_t BufferSize = 100000; + +// These tests exercise the ext_proc filter through Envoy's integration test +// environment by configuring an instance of the Envoy server and driving it +// through the mock network stack. + +class ExtProcIntegrationFuzz : public HttpIntegrationTest, + public Grpc::BaseGrpcClientIntegrationParamTest { +public: + ExtProcIntegrationFuzz(Network::Address::IpVersion ip_version, Grpc::ClientType client_type) + : HttpIntegrationTest(Http::CodecType::HTTP2, ip_version) { + ip_version_ = ip_version; + client_type_ = client_type; + } + + void tearDown() { + cleanupUpstreamAndDownstream(); + test_processor_.shutdown(); + } + + Network::Address::IpVersion ipVersion() const override { return ip_version_; } + Grpc::ClientType clientType() const override { return client_type_; } + + void initializeFuzzer(bool autonomous_upstream) { + autonomous_upstream_ = autonomous_upstream; + autonomous_allow_incomplete_streams_ = true; + initializeConfig(); + HttpIntegrationTest::initialize(); + } + + void initializeConfig() { + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Create a cluster for our gRPC server pointing to the address that is running the gRPC + // server. + auto* processor_cluster = bootstrap.mutable_static_resources()->add_clusters(); + processor_cluster->set_name("ext_proc_server"); + processor_cluster->mutable_load_assignment()->set_cluster_name("ext_proc_server"); + auto* address = processor_cluster->mutable_load_assignment() + ->add_endpoints() + ->add_lb_endpoints() + ->mutable_endpoint() + ->mutable_address() + ->mutable_socket_address(); + address->set_address(Network::Test::getLoopbackAddressString(ipVersion())); + address->set_port_value(test_processor_.port()); + + // Ensure "HTTP2 with no prior knowledge." Necessary for gRPC. + ConfigHelper::setHttp2( + *(bootstrap.mutable_static_resources()->mutable_clusters()->Mutable(0))); + ConfigHelper::setHttp2(*processor_cluster); + + // Make sure both flavors of gRPC client use the right address. + const auto addr = Network::Test::getCanonicalLoopbackAddress(ipVersion()); + const auto addr_port = Network::Utility::getAddressWithPort(*addr, test_processor_.port()); + setGrpcService(*proto_config_.mutable_grpc_service(), "ext_proc_server", addr_port); + + // Merge the filter. + envoy::config::listener::v3::Filter ext_proc_filter; + ext_proc_filter.set_name("envoy.filters.http.ext_proc"); + ext_proc_filter.mutable_typed_config()->PackFrom(proto_config_); + config_helper_.addFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_proc_filter)); + }); + + // Make sure that we have control over when buffers will fill up. + config_helper_.setBufferLimits(BufferSize, BufferSize); + + setUpstreamProtocol(Http::CodecType::HTTP2); + setDownstreamProtocol(Http::CodecType::HTTP2); + } + + IntegrationStreamDecoderPtr sendDownstreamRequest( + absl::optional> modify_headers, + absl::string_view http_method = "GET") { + auto conn = makeClientConnection(lookupPort("http")); + codec_client_ = makeHttpConnection(std::move(conn)); + Http::TestRequestHeaderMapImpl headers{{":method", std::string(http_method)}}; + if (modify_headers) { + (*modify_headers)(headers); + } + HttpTestUtility::addDefaultHeaders(headers, false); + return codec_client_->makeHeaderOnlyRequest(headers); + } + + IntegrationStreamDecoderPtr sendDownstreamRequestWithBody( + absl::string_view body, + absl::optional> modify_headers, + absl::string_view http_method = "POST") { + auto conn = makeClientConnection(lookupPort("http")); + codec_client_ = makeHttpConnection(std::move(conn)); + Http::TestRequestHeaderMapImpl headers{{":method", std::string(http_method)}}; + HttpTestUtility::addDefaultHeaders(headers, false); + if (modify_headers) { + (*modify_headers)(headers); + } + return codec_client_->makeRequestWithBody(headers, std::string(body)); + } + + IntegrationStreamDecoderPtr sendDownstreamRequestWithChunks( + FuzzedDataProvider* fdp, ExtProcFuzzHelper* fh, + absl::optional> modify_headers, + absl::string_view http_method = "POST") { + auto conn = makeClientConnection(lookupPort("http")); + codec_client_ = makeHttpConnection(std::move(conn)); + Http::TestRequestHeaderMapImpl headers{{":method", std::string(http_method)}}; + HttpTestUtility::addDefaultHeaders(headers, false); + if (modify_headers) { + (*modify_headers)(headers); + } + auto encoder_decoder = codec_client_->startRequest(headers); + IntegrationStreamDecoderPtr response = std::move(encoder_decoder.second); + auto& encoder = encoder_decoder.first; + + const uint32_t num_chunks = + fdp->ConsumeIntegralInRange(0, ExtProcFuzzMaxStreamChunks); + for (uint32_t i = 0; i < num_chunks; i++) { + // TODO(ikepolinsky): open issue for this crash and remove locks once + // fixed. + // If proxy closes connection before body is fully sent it causes a + // crash. To address this, the external processor sets a flag to + // signal when it has generated an immediate response which will close + // the connection in the future. We check this flag, which is protected + // by a lock, before sending a chunk. If the flag is set, we don't attempt + // to send more data, regardless of whether or not the + // codec_client connection is still open. There are no locks protecting + // the codec_client connection and cannot trust that it's safe to send + // another chunk + fh->immediate_resp_lock_.lock(); + if (fh->immediate_resp_sent_) { + ENVOY_LOG_MISC(trace, "Proxy closed connection, returning early"); + fh->immediate_resp_lock_.unlock(); + return response; + } + const uint32_t data_size = fdp->ConsumeIntegralInRange(0, ExtProcFuzzMaxDataSize); + ENVOY_LOG_MISC(trace, "Sending chunk of {} bytes", data_size); + codec_client_->sendData(encoder, data_size, false); + fh->immediate_resp_lock_.unlock(); + } + + // See comment above + fh->immediate_resp_lock_.lock(); + if (!fh->immediate_resp_sent_) { + ENVOY_LOG_MISC(trace, "Sending empty chunk to close stream"); + Buffer::OwnedImpl empty_chunk; + codec_client_->sendData(encoder, empty_chunk, true); + } + fh->immediate_resp_lock_.unlock(); + return response; + } + + IntegrationStreamDecoderPtr randomDownstreamRequest(FuzzedDataProvider* fdp, + ExtProcFuzzHelper* fh) { + // From the external processor's view each of these requests + // are handled the same way. They only differ in what the server should + // send back to the client. + // TODO(ikepolinsky): add random flag for sending trailers with a request + // using HttpIntegration::sendTrailers() + switch (fdp->ConsumeEnum()) { + case HttpMethod::GET: + ENVOY_LOG_MISC(trace, "Sending GET request"); + return sendDownstreamRequest(absl::nullopt); + case HttpMethod::POST: + if (fdp->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "Sending POST request with body"); + const uint32_t data_size = fdp->ConsumeIntegralInRange(0, ExtProcFuzzMaxDataSize); + const std::string data = std::string(data_size, 'a'); + return sendDownstreamRequestWithBody(data, absl::nullopt); + } else { + ENVOY_LOG_MISC(trace, "Sending POST request with chunked body"); + return sendDownstreamRequestWithChunks(fdp, fh, absl::nullopt); + } + default: + RELEASE_ASSERT(false, "Unhandled HttpMethod"); + } + } + + envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config_{}; + TestProcessor test_processor_; + Network::Address::IpVersion ip_version_; + Grpc::ClientType client_type_; +}; + +DEFINE_FUZZER(const uint8_t* buf, size_t len) { + // Split the buffer into two buffers with at least 1 byte + if (len < 2) { + return; + } + + // External Process and downstream are on different threads so they should + // have separate data providers + const size_t downstream_buf_len = len / 2; + const size_t ext_proc_buf_len = len - downstream_buf_len; + + // downstream buf starts at 0, ext_prob buf starts at buf[downstream_buf_len] + FuzzedDataProvider downstream_provider(buf, downstream_buf_len); + FuzzedDataProvider ext_proc_provider(&buf[downstream_buf_len], ext_proc_buf_len); + + // Get IP and gRPC version from environment + ExtProcIntegrationFuzz fuzzer(TestEnvironment::getIpVersionsForTest()[0], + TestEnvironment::getsGrpcVersionsForTest()[0]); + ExtProcFuzzHelper fuzz_helper(&ext_proc_provider); + + // This starts an external processor in a separate thread. This allows for the + // external process to consume messages in a loop without blocking the fuzz + // target from receiving the response. + fuzzer.test_processor_.start( + fuzzer.ip_version_, + [&fuzz_helper](grpc::ServerReaderWriter* stream) { + while (true) { + ProcessingRequest req; + if (!stream->Read(&req)) { + return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected message"); + } + + fuzz_helper.logRequest(&req); + + // The following blocks generate random data for the 9 fields of the + // ProcessingResponse gRPC message + + // 1 - 7. Randomize response + // If true, immediately close the connection with a random Grpc Status. + // Otherwise randomize the response + ProcessingResponse resp; + if (fuzz_helper.provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "Immediately Closing gRPC connection"); + return fuzz_helper.randomGrpcStatusWithMessage(); + } else { + ENVOY_LOG_MISC(trace, "Generating Random ProcessingResponse"); + fuzz_helper.randomizeResponse(&resp, &req); + } + + // 8. Randomize dynamic_metadata + // TODO(ikepolinsky): ext_proc does not support dynamic_metadata + + // 9. Randomize mode_override + if (fuzz_helper.provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "Generating Random ProcessingMode Override"); + ProcessingMode* msg = resp.mutable_mode_override(); + fuzz_helper.randomizeOverrideResponse(msg); + } + + ENVOY_LOG_MISC(trace, "Response generated, writing to stream."); + stream->Write(resp); + } + + return grpc::Status::OK; + }); + + ENVOY_LOG_MISC(trace, "External Process started."); + + fuzzer.initializeFuzzer(true); + ENVOY_LOG_MISC(trace, "Fuzzer initialized"); + + const auto response = fuzzer.randomDownstreamRequest(&downstream_provider, &fuzz_helper); + + // For fuzz testing we don't care about the response code, only that + // the stream ended in some graceful manner + ENVOY_LOG_MISC(trace, "Waiting for response."); + if (response->waitForEndStream(std::chrono::milliseconds(200))) { + ENVOY_LOG_MISC(trace, "Response received."); + } else { + // TODO(ikepolinsky): investigate if there is anyway around this. + // Waiting too long for a fuzz case to fail will drastically + // reduce executions/second. + ENVOY_LOG_MISC(trace, "Response timed out."); + } + fuzzer.tearDown(); +} + +} // namespace ExternalProcessing +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc new file mode 100644 index 0000000000000..b5d5e112dffe4 --- /dev/null +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.cc @@ -0,0 +1,459 @@ +#include "test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h" + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/type/v3/http_status.pb.h" + +#include "source/common/common/thread.h" + +#include "test/common/http/common.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/test_common/utility.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace ExternalProcessing { + +using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; +using envoy::service::ext_proc::v3alpha::CommonResponse; +using envoy::service::ext_proc::v3alpha::HeaderMutation; +using envoy::service::ext_proc::v3alpha::ImmediateResponse; +using envoy::service::ext_proc::v3alpha::ProcessingRequest; +using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::type::v3::StatusCode; + +const StatusCode HttpStatusCodes[] = { + StatusCode::Continue, + StatusCode::OK, + StatusCode::Created, + StatusCode::Accepted, + StatusCode::NonAuthoritativeInformation, + StatusCode::NoContent, + StatusCode::ResetContent, + StatusCode::PartialContent, + StatusCode::MultiStatus, + StatusCode::AlreadyReported, + StatusCode::IMUsed, + StatusCode::MultipleChoices, + StatusCode::MovedPermanently, + StatusCode::Found, + StatusCode::SeeOther, + StatusCode::NotModified, + StatusCode::UseProxy, + StatusCode::TemporaryRedirect, + StatusCode::PermanentRedirect, + StatusCode::BadRequest, + StatusCode::Unauthorized, + StatusCode::PaymentRequired, + StatusCode::Forbidden, + StatusCode::NotFound, + StatusCode::MethodNotAllowed, + StatusCode::NotAcceptable, + StatusCode::ProxyAuthenticationRequired, + StatusCode::RequestTimeout, + StatusCode::Conflict, + StatusCode::Gone, + StatusCode::LengthRequired, + StatusCode::PreconditionFailed, + StatusCode::PayloadTooLarge, + StatusCode::URITooLong, + StatusCode::UnsupportedMediaType, + StatusCode::RangeNotSatisfiable, + StatusCode::ExpectationFailed, + StatusCode::MisdirectedRequest, + StatusCode::UnprocessableEntity, + StatusCode::Locked, + StatusCode::FailedDependency, + StatusCode::UpgradeRequired, + StatusCode::PreconditionRequired, + StatusCode::TooManyRequests, + StatusCode::RequestHeaderFieldsTooLarge, + StatusCode::InternalServerError, + StatusCode::NotImplemented, + StatusCode::BadGateway, + StatusCode::ServiceUnavailable, + StatusCode::GatewayTimeout, + StatusCode::HTTPVersionNotSupported, + StatusCode::VariantAlsoNegotiates, + StatusCode::InsufficientStorage, + StatusCode::LoopDetected, + StatusCode::NotExtended, + StatusCode::NetworkAuthenticationRequired, +}; + +const grpc::StatusCode GrpcStatusCodes[] = { + grpc::StatusCode::OK, + grpc::StatusCode::CANCELLED, + grpc::StatusCode::UNKNOWN, + grpc::StatusCode::INVALID_ARGUMENT, + grpc::StatusCode::DEADLINE_EXCEEDED, + grpc::StatusCode::NOT_FOUND, + grpc::StatusCode::ALREADY_EXISTS, + grpc::StatusCode::PERMISSION_DENIED, + grpc::StatusCode::RESOURCE_EXHAUSTED, + grpc::StatusCode::FAILED_PRECONDITION, + grpc::StatusCode::ABORTED, + grpc::StatusCode::OUT_OF_RANGE, + grpc::StatusCode::UNIMPLEMENTED, + grpc::StatusCode::INTERNAL, + grpc::StatusCode::UNAVAILABLE, + grpc::StatusCode::DATA_LOSS, + grpc::StatusCode::UNAUTHENTICATED, +}; + +ExtProcFuzzHelper::ExtProcFuzzHelper(FuzzedDataProvider* provider) { + provider_ = provider; + immediate_resp_sent_ = false; +} + +std::string ExtProcFuzzHelper::consumeRepeatedString() { + const uint32_t str_len = provider_->ConsumeIntegralInRange(0, ExtProcFuzzMaxDataSize); + return std::string(str_len, 'b'); +} + +// Since FuzzedDataProvider requires enums to define a kMaxValue, we cannot +// use the envoy::type::v3::StatusCode enum directly. +StatusCode ExtProcFuzzHelper::randomHttpStatus() { + const StatusCode rv = provider_->PickValueInArray(HttpStatusCodes); + ENVOY_LOG_MISC(trace, "Selected HTTP StatusCode {}", rv); + return rv; +} + +// Since FuzzedDataProvider requires enums to define a kMaxValue, we cannot +// use the grpc::StatusCode enum directly. +grpc::StatusCode ExtProcFuzzHelper::randomGrpcStatusCode() { + const grpc::StatusCode rv = provider_->PickValueInArray(GrpcStatusCodes); + ENVOY_LOG_MISC(trace, "Selected gRPC StatusCode {}", rv); + return rv; +} + +void ExtProcFuzzHelper::logRequest(const ProcessingRequest* req) { + if (req->has_request_headers()) { + ENVOY_LOG_MISC(trace, "Received ProcessingRequest request_headers"); + } else if (req->has_response_headers()) { + ENVOY_LOG_MISC(trace, "Received ProcessingRequest response_headers"); + } else if (req->has_request_body()) { + ENVOY_LOG_MISC(trace, "Received ProcessingRequest request_body"); + } else if (req->has_response_body()) { + ENVOY_LOG_MISC(trace, "Received ProcessingRequest response_body"); + } else if (req->has_request_trailers()) { + ENVOY_LOG_MISC(trace, "Received ProcessingRequest request_trailers"); + } else if (req->has_response_trailers()) { + ENVOY_LOG_MISC(trace, "Received ProcessingRequest response_trailers"); + } else { + ENVOY_LOG_MISC(trace, "Received unexpected ProcessingRequest"); + } +} + +grpc::Status ExtProcFuzzHelper::randomGrpcStatusWithMessage() { + const grpc::StatusCode code = randomGrpcStatusCode(); + ENVOY_LOG_MISC(trace, "Closing stream with StatusCode {}", code); + return grpc::Status(code, consumeRepeatedString()); +} + +// TODO(ikepolinsky): implement this function +// Randomizes a HeaderMutation taken as input. Header/Trailer values of the +// request are available in req which allows for more guided manipulation of the +// headers. The bool value should be false to manipulate headers and +// true to manipulate trailers (which are also a header map) +void ExtProcFuzzHelper::randomizeHeaderMutation(HeaderMutation*, const ProcessingRequest*, + const bool) { + // Each of the following blocks generates random data for the 2 fields + // of a HeaderMutation gRPC message + + // 1. Randomize set_headers + // TODO(ikepolinsky): randomly add headers + + // 2. Randomize remove headers + // TODO(ikepolinsky): Randomly remove headers +} + +void ExtProcFuzzHelper::randomizeCommonResponse(CommonResponse* msg, const ProcessingRequest* req) { + // Each of the following blocks generates random data for the 5 fields + // of CommonResponse gRPC message + // 1. Randomize status + if (provider_->ConsumeBool()) { + switch (provider_->ConsumeEnum()) { + case CommonResponseStatus::Continue: + ENVOY_LOG_MISC(trace, "CommonResponse status CONTINUE"); + msg->set_status(CommonResponse::CONTINUE); + break; + case CommonResponseStatus::ContinueAndReplace: + ENVOY_LOG_MISC(trace, "CommonResponse status CONTINUE_AND_REPLACE"); + msg->set_status(CommonResponse::CONTINUE_AND_REPLACE); + break; + default: + RELEASE_ASSERT(false, "Unhandled case in random CommonResponse Status"); + } + } + + // 2. Randomize header_mutation + if (provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "CommonResponse setting header_mutation"); + randomizeHeaderMutation(msg->mutable_header_mutation(), req, false); + } + + // 3. Randomize body_mutation + if (provider_->ConsumeBool()) { + auto* body_mutation = msg->mutable_body_mutation(); + if (provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "CommonResponse setting body_mutation, replacing body with bytes"); + body_mutation->set_body(consumeRepeatedString()); + } else { + ENVOY_LOG_MISC(trace, "CommonResponse setting body_mutation, clearing body"); + body_mutation->set_clear_body(provider_->ConsumeBool()); + } + } + + // 4. Randomize trailers + // TODO(ikepolinsky) ext_proc currently does not support this field + + // 5. Randomize clear_route_cache + if (provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "CommonResponse clearing route cache"); + msg->set_clear_route_cache(true); + } +} + +void ExtProcFuzzHelper::randomizeImmediateResponse(ImmediateResponse* msg, + const ProcessingRequest* req) { + // Each of the following blocks generates random data for the 5 fields + // of an ImmediateResponse gRPC message + // 1. Randomize HTTP status (required) + ENVOY_LOG_MISC(trace, "ImmediateResponse setting status"); + msg->mutable_status()->set_code(randomHttpStatus()); + + // 2. Randomize headers + if (provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "ImmediateResponse setting headers"); + randomizeHeaderMutation(msg->mutable_headers(), req, false); + } + + // 3. Randomize body + if (provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "ImmediateResponse setting body"); + msg->set_body(consumeRepeatedString()); + } + + // 4. Randomize grpc_status + if (provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "ImmediateResponse setting grpc_status"); + msg->mutable_grpc_status()->set_status(randomGrpcStatusCode()); + } + + // 5. Randomize details + if (provider_->ConsumeBool()) { + ENVOY_LOG_MISC(trace, "ImmediateResponse setting details"); + msg->set_details(consumeRepeatedString()); + } +} + +void ExtProcFuzzHelper::randomizeOverrideResponse(ProcessingMode* msg) { + // Each of the following blocks generates random data for the 6 fields + // of a ProcessingMode gRPC message + // 1. Randomize request_header_mode + if (provider_->ConsumeBool()) { + switch (provider_->ConsumeEnum()) { + case HeaderSendSetting::Default: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_header_mode DEFAULT"); + msg->set_request_header_mode(ProcessingMode::DEFAULT); + break; + case HeaderSendSetting::Send: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_header_mode SEND"); + msg->set_request_header_mode(ProcessingMode::SEND); + break; + case HeaderSendSetting::Skip: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_header_mode SKIP"); + msg->set_request_header_mode(ProcessingMode::SKIP); + break; + default: + RELEASE_ASSERT(false, "HeaderSendSetting not handled"); + } + } + + // 2. Randomize response_header_mode + if (provider_->ConsumeBool()) { + switch (provider_->ConsumeEnum()) { + case HeaderSendSetting::Default: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_header_mode DEFAULT"); + msg->set_response_header_mode(ProcessingMode::DEFAULT); + break; + case HeaderSendSetting::Send: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_header_mode SEND"); + msg->set_response_header_mode(ProcessingMode::SEND); + break; + case HeaderSendSetting::Skip: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_header_mode SKIP"); + msg->set_response_header_mode(ProcessingMode::SKIP); + break; + default: + RELEASE_ASSERT(false, "HeaderSendSetting not handled"); + } + } + + // 3. Randomize request_body_mode + if (provider_->ConsumeBool()) { + switch (provider_->ConsumeEnum()) { + case BodySendSetting::None: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_body_mode NONE"); + msg->set_request_body_mode(ProcessingMode::NONE); + break; + case BodySendSetting::Streamed: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_body_mode STREAMED"); + msg->set_request_body_mode(ProcessingMode::STREAMED); + break; + case BodySendSetting::Buffered: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_body_mode BUFFERED"); + msg->set_request_body_mode(ProcessingMode::BUFFERED); + break; + case BodySendSetting::BufferedPartial: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_body_mode " + "BUFFERED_PARTIAL"); + msg->set_request_body_mode(ProcessingMode::BUFFERED_PARTIAL); + break; + default: + RELEASE_ASSERT(false, "BodySendSetting not handled"); + } + } + + // 4. Randomize response_body_mode + if (provider_->ConsumeBool()) { + switch (provider_->ConsumeEnum()) { + case BodySendSetting::None: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_body_mode NONE"); + msg->set_response_body_mode(ProcessingMode::NONE); + break; + case BodySendSetting::Streamed: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_body_mode STREAMED"); + msg->set_response_body_mode(ProcessingMode::STREAMED); + break; + case BodySendSetting::Buffered: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_body_mode BUFFERED"); + msg->set_response_body_mode(ProcessingMode::BUFFERED); + break; + case BodySendSetting::BufferedPartial: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_body_mode " + "BUFFERED_PARTIAL"); + msg->set_response_body_mode(ProcessingMode::BUFFERED_PARTIAL); + break; + default: + RELEASE_ASSERT(false, "BodySendSetting not handled"); + } + } + + // 5. Randomize request_trailer_mode + if (provider_->ConsumeBool()) { + switch (provider_->ConsumeEnum()) { + case HeaderSendSetting::Default: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_trailer_mode DEFAULT"); + msg->set_request_trailer_mode(ProcessingMode::DEFAULT); + break; + case HeaderSendSetting::Send: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_trailer_mode SEND"); + msg->set_request_trailer_mode(ProcessingMode::SEND); + break; + case HeaderSendSetting::Skip: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting request_trailer_mode SKIP"); + msg->set_request_trailer_mode(ProcessingMode::SKIP); + break; + default: + RELEASE_ASSERT(false, "HeaderSendSetting not handled"); + } + } + + // 6. Randomize response_trailer_mode + if (provider_->ConsumeBool()) { + switch (provider_->ConsumeEnum()) { + case HeaderSendSetting::Default: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_trailer_mode DEFAULT"); + msg->set_response_trailer_mode(ProcessingMode::DEFAULT); + break; + case HeaderSendSetting::Send: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_trailer_mode SEND"); + msg->set_response_trailer_mode(ProcessingMode::SEND); + break; + case HeaderSendSetting::Skip: + ENVOY_LOG_MISC(trace, "Override ProcessingMode: setting response_trailer_mode SKIP"); + msg->set_response_trailer_mode(ProcessingMode::SKIP); + break; + default: + RELEASE_ASSERT(false, "HeaderSendSetting not handled"); + } + } +} + +void ExtProcFuzzHelper::randomizeResponse(ProcessingResponse* resp, const ProcessingRequest* req) { + // Each of the following switch cases generate random data for 1 of the 7 + // ProcessingResponse.response fields + switch (provider_->ConsumeEnum()) { + // 1. Randomize request_headers message + case ResponseType::RequestHeaders: { + ENVOY_LOG_MISC(trace, "ProcessingResponse setting request_headers response"); + CommonResponse* msg = resp->mutable_request_headers()->mutable_response(); + randomizeCommonResponse(msg, req); + break; + } + // 2. Randomize response_headers message + case ResponseType::ResponseHeaders: { + ENVOY_LOG_MISC(trace, "ProcessingResponse setting response_headers response"); + CommonResponse* msg = resp->mutable_response_headers()->mutable_response(); + randomizeCommonResponse(msg, req); + break; + } + // 3. Randomize request_body message + case ResponseType::RequestBody: { + ENVOY_LOG_MISC(trace, "ProcessingResponse setting request_body response"); + CommonResponse* msg = resp->mutable_request_body()->mutable_response(); + randomizeCommonResponse(msg, req); + break; + } + // 4. Randomize response_body message + case ResponseType::ResponseBody: { + ENVOY_LOG_MISC(trace, "ProcessingResponse setting response_body response"); + CommonResponse* msg = resp->mutable_response_body()->mutable_response(); + randomizeCommonResponse(msg, req); + break; + } + // 5. Randomize request_trailers message + case ResponseType::RequestTrailers: { + ENVOY_LOG_MISC(trace, "ProcessingResponse setting request_trailers response"); + HeaderMutation* header_mutation = resp->mutable_request_trailers()->mutable_header_mutation(); + randomizeHeaderMutation(header_mutation, req, true); + break; + } + // 6. Randomize response_trailers message + case ResponseType::ResponseTrailers: { + ENVOY_LOG_MISC(trace, "ProcessingResponse setting response_trailers response"); + HeaderMutation* header_mutation = resp->mutable_request_trailers()->mutable_header_mutation(); + randomizeHeaderMutation(header_mutation, req, true); + break; + } + // 7. Randomize immediate_response message + case ResponseType::ImmediateResponse: { + ENVOY_LOG_MISC(trace, "ProcessingResponse setting immediate_response response"); + ImmediateResponse* msg = resp->mutable_immediate_response(); + randomizeImmediateResponse(msg, req); + + // Since we are sending an immediate response, envoy will close the + // mock connection with the downstream. As a result, the + // codec_client_connection will be deleted and if the upstream is still + // sending data chunks (e.g., streaming mode) it will cause a crash + // Note: At this point provider_lock_ is not held so deadlock is not + // possible + + immediate_resp_lock_.lock(); + immediate_resp_sent_ = true; + immediate_resp_lock_.unlock(); + break; + } + default: + RELEASE_ASSERT(false, "ProcessingResponse Action not handled"); + } +} + +} // namespace ExternalProcessing +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h new file mode 100644 index 0000000000000..1b5a6359dea43 --- /dev/null +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz_helper.h @@ -0,0 +1,121 @@ +#pragma once + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" +#include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "envoy/type/v3/http_status.pb.h" + +#include "source/common/common/thread.h" +#include "source/common/grpc/common.h" + +#include "test/common/http/common.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/test_common/utility.h" + +#include "grpc++/server_builder.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace ExternalProcessing { + +using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; +using envoy::service::ext_proc::v3alpha::CommonResponse; +using envoy::service::ext_proc::v3alpha::HeaderMutation; +using envoy::service::ext_proc::v3alpha::ImmediateResponse; +using envoy::service::ext_proc::v3alpha::ProcessingRequest; +using envoy::service::ext_proc::v3alpha::ProcessingResponse; +using envoy::type::v3::StatusCode; + +const uint32_t ExtProcFuzzMaxDataSize = 1024; +const uint32_t ExtProcFuzzMaxStreamChunks = 50; + +// TODO(ikepolinsky): integrate an upstream that can be controlled by the fuzzer +// and responds appropriately to HTTP requests. +// Currently using autonomous upstream which sends 10 bytes in response to any +// HTTP message. This is an invalid response to TRACE, HEAD, and PUT requests +// so they are currently not supported. DELETE, PATCH, CONNECT, and OPTIONS +// use the same two send functions as GET and POST but with a different method value +// (e.g., they just use sendDownstreamRequest and sendDownstreamRequestWithBody) +// for simplicity I have excluded anything other than GET and POST for now. +// As more HTTP methods are added, update kMaxValue as appropriate to include +// the new enum as a fuzz choice +enum class HttpMethod { + GET, + POST, + DELETE, + PATCH, + CONNECT, + OPTIONS, + TRACE, + HEAD, + PUT, + kMaxValue = POST // NOLINT: FuzzedDataProvider requires lowercase k +}; + +enum class ResponseType { + RequestHeaders, + ResponseHeaders, + RequestBody, + ResponseBody, + ImmediateResponse, + RequestTrailers, + ResponseTrailers, + kMaxValue = ResponseTrailers // NOLINT: FuzzedDataProvider requires lowercase k +}; + +enum class HeaderSendSetting { + Default, + Send, + Skip, + kMaxValue = Skip // NOLINT: FuzzedDataProvider requires lowercase k +}; + +enum class BodySendSetting { + None, + Buffered, + Streamed, + BufferedPartial, + kMaxValue = BufferedPartial // NOLINT: FuzzedDataProvider requires lowercase k +}; + +enum class CommonResponseStatus { + Continue, + ContinueAndReplace, + kMaxValue = ContinueAndReplace // NOLINT: FuzzedDataProvider requires lowercase k +}; + +// Helper class for fuzzing the ext_proc filter. +// This class exposes functions for randomizing fields of ProcessingResponse +// messages and sub-messages. Further, this class exposes wrappers for +// FuzzedDataProvider functions enabling it to be used safely across multiple +// threads (e.g., in the fuzzer thread and the external processor thread). +class ExtProcFuzzHelper { +public: + ExtProcFuzzHelper(FuzzedDataProvider* provider); + + StatusCode randomHttpStatus(); + std::string consumeRepeatedString(); + grpc::StatusCode randomGrpcStatusCode(); + grpc::Status randomGrpcStatusWithMessage(); + + void logRequest(const ProcessingRequest* req); + void randomizeHeaderMutation(HeaderMutation* headers, const ProcessingRequest* req, + const bool trailers); + void randomizeCommonResponse(CommonResponse* msg, const ProcessingRequest* req); + void randomizeImmediateResponse(ImmediateResponse* msg, const ProcessingRequest* req); + void randomizeOverrideResponse(ProcessingMode* msg); + void randomizeResponse(ProcessingResponse* resp, const ProcessingRequest* req); + + FuzzedDataProvider* provider_; + + // Protects immediate_resp_sent_ + Thread::MutexBasicLockable immediate_resp_lock_; + // Flags if an immediate response was generated and sent + bool immediate_resp_sent_; +}; + +} // namespace ExternalProcessing +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc index 4d22ca5dba855..eae609db2a7e7 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc @@ -476,13 +476,75 @@ TEST_P(ExtProcIntegrationTest, GetAndSetHeadersOnResponse) { auto* add1 = response_mutation->add_set_headers(); add1->mutable_header()->set_key("x-response-processed"); add1->mutable_header()->set_value("1"); + auto* add2 = response_mutation->add_set_headers(); + add2->mutable_header()->set_key(":status"); + add2->mutable_header()->set_value("201"); return true; }); + verifyDownstreamResponse(*response, 201); + EXPECT_THAT(response->headers(), SingleHeaderValueIs("x-response-processed", "1")); +} + +// Test the filter using the default configuration by connecting to +// an ext_proc server that responds to the response_headers message +// but tries to set the status code to an invalid value +TEST_P(ExtProcIntegrationTest, GetAndSetHeadersOnResponseBadStatus) { + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequest(absl::nullopt); + processRequestHeadersMessage(true, absl::nullopt); + handleUpstreamRequest(); + + processResponseHeadersMessage(false, [](const HttpHeaders&, HeadersResponse& headers_resp) { + auto* response_mutation = headers_resp.mutable_response()->mutable_header_mutation(); + auto* add1 = response_mutation->add_set_headers(); + add1->mutable_header()->set_key("x-response-processed"); + add1->mutable_header()->set_value("1"); + auto* add2 = response_mutation->add_set_headers(); + add2->mutable_header()->set_key(":status"); + add2->mutable_header()->set_value("100"); + return true; + }); + + // Invalid status code should be ignored, but the other header mutation + // should still have been processed. verifyDownstreamResponse(*response, 200); EXPECT_THAT(response->headers(), SingleHeaderValueIs("x-response-processed", "1")); } +// Test the filter using the default configuration by connecting to +// an ext_proc server that responds to the response_headers message +// but tries to set the status code to two values. The second +// attempt should be ignored. +TEST_P(ExtProcIntegrationTest, GetAndSetHeadersOnResponseTwoStatuses) { + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequest(absl::nullopt); + processRequestHeadersMessage(true, absl::nullopt); + handleUpstreamRequest(); + + processResponseHeadersMessage(false, [](const HttpHeaders&, HeadersResponse& headers_resp) { + auto* response_mutation = headers_resp.mutable_response()->mutable_header_mutation(); + auto* add1 = response_mutation->add_set_headers(); + add1->mutable_header()->set_key("x-response-processed"); + add1->mutable_header()->set_value("1"); + auto* add2 = response_mutation->add_set_headers(); + add2->mutable_header()->set_key(":status"); + add2->mutable_header()->set_value("201"); + auto* add3 = response_mutation->add_set_headers(); + add3->mutable_header()->set_key(":status"); + add3->mutable_header()->set_value("202"); + add3->mutable_append()->set_value(true); + return true; + }); + + // Invalid status code should be ignored, but the other header mutation + // should still have been processed. + verifyDownstreamResponse(*response, 201); + EXPECT_THAT(response->headers(), SingleHeaderValueIs("x-response-processed", "1")); +} + // Test the filter using the default configuration by connecting to // an ext_proc server that responds to the response_headers message // by checking the headers and modifying the trailers @@ -789,7 +851,7 @@ TEST_P(ExtProcIntegrationTest, GetAndRespondImmediatelyOnResponse) { EXPECT_EQ("{\"reason\": \"Not authorized\"}", response->body()); } -// Test the filter with request body streaming enabled using +// Test the filter with request body buffering enabled using // an ext_proc server that responds to the request_body message // by sending back an immediate_response message TEST_P(ExtProcIntegrationTest, GetAndRespondImmediatelyOnRequestBody) { @@ -808,7 +870,7 @@ TEST_P(ExtProcIntegrationTest, GetAndRespondImmediatelyOnRequestBody) { EXPECT_EQ("{\"reason\": \"Not authorized\"}", response->body()); } -// Test the filter with body streaming enabled using +// Test the filter with body buffering enabled using // an ext_proc server that responds to the response_body message // by sending back an immediate_response message. Since this // happens after the response headers have been sent, as a result @@ -834,6 +896,23 @@ TEST_P(ExtProcIntegrationTest, GetAndRespondImmediatelyOnResponseBody) { EXPECT_EQ("{\"reason\": \"Not authorized\"}", response->body()); } +// Test the filter using an ext_proc server that responds to the request_body message +// by sending back an immediate_response message with an invalid status code. +TEST_P(ExtProcIntegrationTest, GetAndRespondImmediatelyWithBadStatus) { + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequestWithBody("Replace this!", absl::nullopt); + processAndRespondImmediately(true, [](ImmediateResponse& immediate) { + immediate.mutable_status()->set_code(envoy::type::v3::StatusCode::Continue); + immediate.set_body("{\"reason\": \"Because\"}"); + immediate.set_details("Failed because we said so"); + }); + + // The attempt to set the status code to 100 should have been ignored. + verifyDownstreamResponse(*response, 200); + EXPECT_EQ("{\"reason\": \"Because\"}", response->body()); +} + // Test the ability of the filter to turn a GET into a POST by adding a body // and changing the method. TEST_P(ExtProcIntegrationTest, ConvertGetToPost) { @@ -1107,6 +1186,31 @@ TEST_P(ExtProcIntegrationTest, BufferBodyOverridePostWithEmptyBodyStreamed) { verifyDownstreamResponse(*response, 200); } +// Test how the filter responds when asked to stream a request body for a POST +// request with an empty body in "buffered partial" mode. We should get an empty body message +// because the Envoy filter stream received the body after all the headers. +TEST_P(ExtProcIntegrationTest, BufferBodyOverridePostWithEmptyBodyBufferedPartial) { + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::BUFFERED_PARTIAL); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequestWithBody("", absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_FALSE(headers.end_of_stream()); + return true; + }); + // We should get an empty body message this time + processRequestBodyMessage(false, [](const HttpBody& body, BodyResponse&) { + EXPECT_TRUE(body.end_of_stream()); + EXPECT_EQ(body.body().size(), 0); + return true; + }); + + handleUpstreamRequest(); + processResponseHeadersMessage(false, absl::nullopt); + verifyDownstreamResponse(*response, 200); +} + // Test how the filter responds when asked to buffer a request body for a GET // request with no body. We should receive no body message because the Envoy // filter stream received the headers and end simultaneously. @@ -1145,6 +1249,25 @@ TEST_P(ExtProcIntegrationTest, BufferBodyOverrideGetRequestNoBodyStreaming) { verifyDownstreamResponse(*response, 200); } +// Test how the filter responds when asked to stream a request body for a GET +// request with no body in "buffered partial" mode. We should receive no body message because the +// Envoy filter stream received the headers and end simultaneously. +TEST_P(ExtProcIntegrationTest, BufferBodyOverrideGetRequestNoBodyBufferedPartial) { + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::BUFFERED_PARTIAL); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequest(absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_TRUE(headers.end_of_stream()); + return true; + }); + // We should not see a request body message here + handleUpstreamRequest(); + processResponseHeadersMessage(false, absl::nullopt); + verifyDownstreamResponse(*response, 200); +} + // Test how the filter responds when asked to buffer a request body for a POST // request with a body. TEST_P(ExtProcIntegrationTest, BufferBodyOverridePostWithRequestBody) { diff --git a/test/extensions/filters/http/ext_proc/filter_test.cc b/test/extensions/filters/http/ext_proc/filter_test.cc index 54a3c1820288c..df872b98a3580 100644 --- a/test/extensions/filters/http/ext_proc/filter_test.cc +++ b/test/extensions/filters/http/ext_proc/filter_test.cc @@ -38,6 +38,7 @@ using Http::FilterHeadersStatus; using Http::FilterTrailersStatus; using Http::LowerCaseString; +using testing::AnyNumber; using testing::Eq; using testing::Invoke; using testing::ReturnRef; @@ -57,6 +58,19 @@ class HttpFilterTest : public testing::Test { EXPECT_CALL(*client_, start(_)).WillOnce(Invoke(this, &HttpFilterTest::doStart)); EXPECT_CALL(encoder_callbacks_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); EXPECT_CALL(decoder_callbacks_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher_)); + EXPECT_CALL(dispatcher_, createTimer_(_)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke([this](Unused) { + // Create a mock timer that we can check at destruction time to see if + // all timers were disabled no matter what. MockTimer has default + // actions that we just have to enable properly here. + auto* timer = new Event::MockTimer(); + EXPECT_CALL(*timer, enableTimer(_, _)).Times(AnyNumber()); + EXPECT_CALL(*timer, disableTimer()).Times(AnyNumber()); + EXPECT_CALL(*timer, enabled()).Times(AnyNumber()); + timers_.push_back(timer); + return timer; + })); envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config{}; if (!yaml.empty()) { @@ -72,6 +86,15 @@ class HttpFilterTest : public testing::Test { request_headers_.setMethod("POST"); } + void TearDown() override { + for (auto* t : timers_) { + // This will fail if, at the end of the test, we left any timers enabled. + // (This particular test suite does not actually let timers expire, + // although other test suites do.) + EXPECT_FALSE(t->enabled_); + } + } + ExternalProcessorStreamPtr doStart(ExternalProcessorCallbacks& callbacks) { stream_callbacks_ = &callbacks; @@ -214,13 +237,14 @@ class HttpFilterTest : public testing::Test { NiceMock stats_store_; FilterConfigSharedPtr config_; std::unique_ptr filter_; - NiceMock dispatcher_; + testing::NiceMock dispatcher_; Http::MockStreamDecoderFilterCallbacks decoder_callbacks_; Http::MockStreamEncoderFilterCallbacks encoder_callbacks_; Http::TestRequestHeaderMapImpl request_headers_; Http::TestResponseHeaderMapImpl response_headers_; Http::TestRequestTrailerMapImpl request_trailers_; Http::TestResponseTrailerMapImpl response_trailers_; + std::vector timers_; }; // Using the default configuration, test the filter with a processor that @@ -838,17 +862,19 @@ TEST_F(HttpFilterTest, PostAndChangeBothBodiesBufferedMultiChunk) { EXPECT_EQ(1, config_->stats().streams_closed_.value()); } -// Using a configuration with streaming set for the request and -// response bodies, we should ignore a "buffered partial" body mode for now -// because it is not implemented. -TEST_F(HttpFilterTest, PostAndIgnoreStreamedBodiesUntilImplemented) { +// Using a configuration with partial buffering set for the request and +// response bodies but not the headers, and with each body +// delivered as set of chunks, test the filter with a processor that +// clears the request body and changes the response body. This works just +// like buffering because the bodies are much smaller than the buffer limit. +TEST_F(HttpFilterTest, PostAndChangeBothBodiesBufferedPartialMultiChunk) { initialize(R"EOF( grpc_service: envoy_grpc: cluster_name: "ext_proc_server" processing_mode: - request_header_mode: "SEND" - response_header_mode: "SEND" + request_header_mode: "SKIP" + response_header_mode: "SKIP" request_body_mode: "BUFFERED_PARTIAL" response_body_mode: "BUFFERED_PARTIAL" request_trailer_mode: "SKIP" @@ -859,35 +885,204 @@ TEST_F(HttpFilterTest, PostAndIgnoreStreamedBodiesUntilImplemented) { request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); request_headers_.addCopy(LowerCaseString("content-length"), 100); - EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); - processRequestHeaders(false, absl::nullopt); + EXPECT_EQ(FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); + Buffer::OwnedImpl upstream_request_body; + EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(nullptr)); Buffer::OwnedImpl req_data; TestUtility::feedBufferWithRandomCharacters(req_data, 100); - EXPECT_EQ(FilterDataStatus::Continue, filter_->decodeData(req_data, true)); - EXPECT_EQ(FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); + Buffer::OwnedImpl empty_data; + EXPECT_EQ(FilterDataStatus::Continue, filter_->decodeData(req_data, false)); + upstream_request_body.move(req_data); + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(empty_data, true)); + upstream_request_body.move(empty_data); + + EXPECT_CALL(decoder_callbacks_, injectDecodedDataToFilterChain(_, false)) + .WillRepeatedly(Invoke([&upstream_request_body](Buffer::Instance& data, Unused) { + upstream_request_body.move(data); + })); + + processRequestBody([](const HttpBody& req_body, ProcessingResponse&, BodyResponse& body_resp) { + EXPECT_TRUE(req_body.end_of_stream()); + auto* body_mut = body_resp.mutable_response()->mutable_body_mutation(); + body_mut->set_body("Changed it!"); + }); + + // Expect that all those bodies ended up being the same as the thing we + // replaced it with. + EXPECT_EQ("Changed it!", upstream_request_body.toString()); response_headers_.addCopy(LowerCaseString(":status"), "200"); response_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); - response_headers_.addCopy(LowerCaseString("content-length"), "100"); - EXPECT_EQ(FilterHeadersStatus::Continue, filter_->encode100ContinueHeaders(response_headers_)); - EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->encodeHeaders(response_headers_, false)); - processResponseHeaders(false, absl::nullopt); + EXPECT_EQ(FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); - Buffer::OwnedImpl resp_data; - TestUtility::feedBufferWithRandomCharacters(resp_data, 100); - EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_data, true)); + Buffer::OwnedImpl expected_response_body; + Buffer::OwnedImpl resp_data_1; + TestUtility::feedBufferWithRandomCharacters(resp_data_1, 100); + expected_response_body.add(resp_data_1.toString()); + Buffer::OwnedImpl resp_data_2; + TestUtility::feedBufferWithRandomCharacters(resp_data_2, 100); + expected_response_body.add(resp_data_2.toString()); + Buffer::OwnedImpl resp_data_3; + TestUtility::feedBufferWithRandomCharacters(resp_data_3, 100); + expected_response_body.add(resp_data_3.toString()); - EXPECT_EQ(FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); - filter_->onDestroy(); + Buffer::OwnedImpl downstream_response_body; + EXPECT_CALL(encoder_callbacks_, injectEncodedDataToFilterChain(_, false)) + .WillRepeatedly(Invoke([&downstream_response_body](Buffer::Instance& data, Unused) { + downstream_response_body.move(data); + })); + + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_data_1, false)); + downstream_response_body.move(resp_data_1); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_data_2, false)); + downstream_response_body.move(resp_data_2); + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(resp_data_3, true)); + downstream_response_body.move(resp_data_3); + + processResponseBody( + [&expected_response_body](const HttpBody& req_body, ProcessingResponse&, BodyResponse&) { + EXPECT_TRUE(req_body.end_of_stream()); + EXPECT_EQ(expected_response_body.toString(), req_body.body()); + }); + // At this point, the whole thing should have been injected to the downstream + EXPECT_EQ(expected_response_body.toString(), downstream_response_body.toString()); + filter_->onDestroy(); EXPECT_EQ(1, config_->stats().streams_started_.value()); EXPECT_EQ(2, config_->stats().stream_msgs_sent_.value()); EXPECT_EQ(2, config_->stats().stream_msgs_received_.value()); EXPECT_EQ(1, config_->stats().streams_closed_.value()); } +// Using a configuration with partial buffering set for the request body, +// test the filter when all the data comes in before the headers callback +// response comes back. +TEST_F(HttpFilterTest, PostFastRequestPartialBuffering) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_proc_server" + processing_mode: + request_header_mode: "SEND" + response_header_mode: "SEND" + request_body_mode: "BUFFERED_PARTIAL" + response_body_mode: "NONE" + request_trailer_mode: "SKIP" + response_trailer_mode: "SKIP" + )EOF"); + + // Create synthetic HTTP request + request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + request_headers_.addCopy(LowerCaseString("content-length"), 100); + + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); + + Buffer::OwnedImpl req_data_1("Hello"); + Buffer::OwnedImpl req_data_2(", World!"); + Buffer::OwnedImpl buffered_data; + setUpDecodingBuffering(buffered_data); + + // Buffering and callback isn't complete so we should watermark + EXPECT_CALL(decoder_callbacks_, onDecoderFilterAboveWriteBufferHighWatermark()); + EXPECT_EQ(FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(req_data_1, false)); + buffered_data.add(req_data_1); + EXPECT_EQ(FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(req_data_2, true)); + buffered_data.add(req_data_2); + + // Now the headers response comes in and we are all done + EXPECT_CALL(decoder_callbacks_, onDecoderFilterBelowWriteBufferLowWatermark()); + processRequestHeaders(true, absl::nullopt); + + processRequestBody([](const HttpBody& req_body, ProcessingResponse&, BodyResponse&) { + EXPECT_TRUE(req_body.end_of_stream()); + EXPECT_EQ("Hello, World!", req_body.body()); + }); + + response_headers_.addCopy(LowerCaseString(":status"), "200"); + response_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + response_headers_.addCopy(LowerCaseString("content-length"), "2"); + + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->encodeHeaders(response_headers_, false)); + processResponseHeaders(false, absl::nullopt); + + Buffer::OwnedImpl resp_data; + resp_data.add("ok"); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_data, true)); + filter_->onDestroy(); +} + +// Using a configuration with partial buffering set for the request body, +// test the filter when the data that comes in before the headers callback +// completes has exceeded the buffer limit. +TEST_F(HttpFilterTest, PostFastAndBigRequestPartialBuffering) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_proc_server" + processing_mode: + request_header_mode: "SEND" + response_header_mode: "SEND" + request_body_mode: "BUFFERED_PARTIAL" + response_body_mode: "NONE" + request_trailer_mode: "SKIP" + response_trailer_mode: "SKIP" + )EOF"); + + // Create synthetic HTTP request + request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + request_headers_.addCopy(LowerCaseString("content-length"), 13000); + + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); + + Buffer::OwnedImpl req_data_1; + TestUtility::feedBufferWithRandomCharacters(req_data_1, 5000); + Buffer::OwnedImpl req_data_2; + TestUtility::feedBufferWithRandomCharacters(req_data_2, 6000); + Buffer::OwnedImpl req_data_3; + TestUtility::feedBufferWithRandomCharacters(req_data_3, 2000); + Buffer::OwnedImpl buffered_data; + setUpDecodingBuffering(buffered_data); + Buffer::OwnedImpl expected_request_data; + expected_request_data.add(req_data_1); + expected_request_data.add(req_data_2); + + // Buffering and callback isn't complete so we should watermark + EXPECT_CALL(decoder_callbacks_, onDecoderFilterAboveWriteBufferHighWatermark()); + EXPECT_EQ(FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(req_data_1, false)); + buffered_data.add(req_data_1); + EXPECT_EQ(FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(req_data_2, false)); + buffered_data.add(req_data_2); + + // Now the headers response comes in. Since we are over the watermark we + // should send the callback. + EXPECT_CALL(decoder_callbacks_, decoderBufferLimit()).WillRepeatedly(Return(10000)); + processRequestHeaders(true, absl::nullopt); + EXPECT_CALL(decoder_callbacks_, onDecoderFilterBelowWriteBufferLowWatermark()); + EXPECT_CALL(decoder_callbacks_, injectDecodedDataToFilterChain(_, false)); + processRequestBody( + [&expected_request_data](const HttpBody& req_body, ProcessingResponse&, BodyResponse&) { + EXPECT_FALSE(req_body.end_of_stream()); + EXPECT_EQ(expected_request_data.toString(), req_body.body()); + }); + + // The rest of the data should continue as normal. + EXPECT_EQ(FilterDataStatus::Continue, filter_->decodeData(req_data_3, true)); + + response_headers_.addCopy(LowerCaseString(":status"), "200"); + response_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + response_headers_.addCopy(LowerCaseString("content-length"), "2"); + + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->encodeHeaders(response_headers_, false)); + processResponseHeaders(false, absl::nullopt); + + Buffer::OwnedImpl resp_data; + resp_data.add("ok"); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_data, true)); + filter_->onDestroy(); +} + // Using a configuration with streaming set for the request and // response bodies, ensure that the chunks are delivered to the processor and // that the processor gets them correctly. diff --git a/test/extensions/filters/http/ext_proc/mutation_utils_test.cc b/test/extensions/filters/http/ext_proc/mutation_utils_test.cc index abed79419dae1..c617caa299d59 100644 --- a/test/extensions/filters/http/ext_proc/mutation_utils_test.cc +++ b/test/extensions/filters/http/ext_proc/mutation_utils_test.cc @@ -66,6 +66,9 @@ TEST(MutationUtils, TestApplyMutations) { s->mutable_append()->set_value(false); s->mutable_header()->set_key("x-replace-this"); s->mutable_header()->set_value("no"); + s = mutation.add_set_headers(); + s->mutable_header()->set_key(":status"); + s->mutable_header()->set_value("418"); // Default of "append" is "false" and mutations // are applied in order. s = mutation.add_set_headers(); @@ -101,6 +104,15 @@ TEST(MutationUtils, TestApplyMutations) { s->mutable_header()->set_key("X-Envoy-StrangeThing"); s->mutable_header()->set_value("Yes"); + // Attempts to set the status header out of range should + // also be ignored. + s = mutation.add_set_headers(); + s->mutable_header()->set_key(":status"); + s->mutable_header()->set_value("This is not even an integer"); + s = mutation.add_set_headers(); + s->mutable_header()->set_key(":status"); + s->mutable_header()->set_value("100"); + MutationUtils::applyHeaderMutations(mutation, headers, false); Http::TestRequestHeaderMapImpl expected_headers{ @@ -109,6 +121,7 @@ TEST(MutationUtils, TestApplyMutations) { {":path", "/foo/the/bar?size=123"}, {"host", "localhost:1000"}, {":authority", "localhost:1000"}, + {":status", "418"}, {"content-type", "text/plain; encoding=UTF8"}, {"x-append-this", "1"}, {"x-append-this", "2"}, @@ -120,6 +133,35 @@ TEST(MutationUtils, TestApplyMutations) { EXPECT_THAT(&headers, HeaderMapEqualIgnoreOrder(&expected_headers)); } +TEST(MutationUtils, TestNonAppendableHeaders) { + Http::TestRequestHeaderMapImpl headers; + envoy::service::ext_proc::v3alpha::HeaderMutation mutation; + auto* s = mutation.add_set_headers(); + s->mutable_append()->set_value(true); + s->mutable_header()->set_key(":path"); + s->mutable_header()->set_value("/foo"); + s = mutation.add_set_headers(); + s->mutable_header()->set_key(":status"); + s->mutable_header()->set_value("400"); + // These two should be ignored since we ignore attempts + // to set multiple values for system headers. + s = mutation.add_set_headers(); + s->mutable_append()->set_value(true); + s->mutable_header()->set_key(":path"); + s->mutable_header()->set_value("/baz"); + s = mutation.add_set_headers(); + s->mutable_append()->set_value(true); + s->mutable_header()->set_key(":status"); + s->mutable_header()->set_value("401"); + + MutationUtils::applyHeaderMutations(mutation, headers, false); + Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/foo"}, + {":status", "400"}, + }; + EXPECT_THAT(&headers, HeaderMapEqualIgnoreOrder(&expected_headers)); +} + // Ensure that we actually replace the body TEST(MutationUtils, TestBodyMutationReplace) { Buffer::OwnedImpl buf; diff --git a/test/extensions/filters/http/ext_proc/ordering_test.cc b/test/extensions/filters/http/ext_proc/ordering_test.cc index d76da3dd8eec1..af3cf936c1111 100644 --- a/test/extensions/filters/http/ext_proc/ordering_test.cc +++ b/test/extensions/filters/http/ext_proc/ordering_test.cc @@ -32,6 +32,7 @@ using Http::FilterHeadersStatus; using Http::FilterTrailersStatus; using Http::LowerCaseString; +using testing::AnyNumber; using testing::Invoke; using testing::Return; using testing::ReturnRef; @@ -39,6 +40,9 @@ using testing::Unused; using namespace std::chrono_literals; +// The value to return for the decoder buffer limit. +static const uint32_t BufferSize = 100000; + // These tests directly drive the filter. They concentrate on testing out all the different // ordering options for the protocol, which means that unlike other tests they do not verify // every parameter sent to or from the filter. @@ -532,9 +536,14 @@ TEST_F(OrderingTest, AddRequestTrailers) { TEST_F(OrderingTest, ImmediateResponseOnRequest) { initialize(absl::nullopt); + // MockTimer constructor sets up expectations in the Dispatcher class to wire it up + MockTimer* request_timer = new MockTimer(&dispatcher_); + EXPECT_CALL(*request_timer, enableTimer(kMessageTimeout, nullptr)); + EXPECT_CALL(*request_timer, enabled()).Times(AnyNumber()); EXPECT_CALL(stream_delegate_, send(_, false)); sendRequestHeadersGet(true); EXPECT_CALL(encoder_callbacks_, sendLocalReply(Http::Code::InternalServerError, _, _, _, _)); + EXPECT_CALL(*request_timer, disableTimer()); sendImmediateResponse500(); // The rest of the filter isn't necessarily called after this. } @@ -543,14 +552,22 @@ TEST_F(OrderingTest, ImmediateResponseOnRequest) { TEST_F(OrderingTest, ImmediateResponseOnResponse) { initialize(absl::nullopt); + MockTimer* request_timer = new MockTimer(&dispatcher_); + EXPECT_CALL(*request_timer, enabled()).Times(AnyNumber()); + EXPECT_CALL(*request_timer, enableTimer(kMessageTimeout, nullptr)); EXPECT_CALL(stream_delegate_, send(_, false)); sendRequestHeadersGet(true); EXPECT_CALL(decoder_callbacks_, continueDecoding()); + EXPECT_CALL(*request_timer, disableTimer()); sendRequestHeadersReply(); + MockTimer* response_timer = new MockTimer(&dispatcher_); + EXPECT_CALL(*response_timer, enableTimer(kMessageTimeout, nullptr)); + EXPECT_CALL(*response_timer, enabled()).Times(AnyNumber()); EXPECT_CALL(stream_delegate_, send(_, false)); sendResponseHeaders(true); EXPECT_CALL(encoder_callbacks_, sendLocalReply(Http::Code::InternalServerError, _, _, _, _)); + EXPECT_CALL(*response_timer, disableTimer()); sendImmediateResponse500(); Buffer::OwnedImpl resp_body("Hello!"); EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_body, true)); @@ -887,6 +904,20 @@ TEST_F(FastFailOrderingTest, GrpcErrorOnStartRequestBody) { EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(req_body, true)); } +// gRPC failure while opening stream with only request body enabled +TEST_F(FastFailOrderingTest, GrpcErrorOnStartRequestBodyBufferedPartial) { + initialize([](ExternalProcessor& cfg) { + auto* pm = cfg.mutable_processing_mode(); + pm->set_request_header_mode(ProcessingMode::SKIP); + pm->set_request_body_mode(ProcessingMode::BUFFERED_PARTIAL); + }); + EXPECT_CALL(decoder_callbacks_, decoderBufferLimit()).WillRepeatedly(Return(BufferSize)); + sendRequestHeadersPost(false); + Buffer::OwnedImpl req_body("Hello!"); + EXPECT_CALL(encoder_callbacks_, sendLocalReply(Http::Code::InternalServerError, _, _, _, _)); + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(req_body, true)); +} + // gRPC failure while opening stream with only request body enabled in streaming mode TEST_F(FastFailOrderingTest, GrpcErrorOnStartRequestBodyStreaming) { initialize([](ExternalProcessor& cfg) { @@ -922,6 +953,23 @@ TEST_F(FastFailOrderingTest, GrpcErrorIgnoredOnStartRequestBody) { EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_body, true)); } +// gRPC failure while opening stream with only request body enabled and errors ignored +TEST_F(FastFailOrderingTest, GrpcErrorIgnoredOnStartRequestBodyBufferedPartial) { + initialize([](ExternalProcessor& cfg) { + cfg.set_failure_mode_allow(true); + auto* pm = cfg.mutable_processing_mode(); + pm->set_request_header_mode(ProcessingMode::SKIP); + pm->set_request_body_mode(ProcessingMode::BUFFERED_PARTIAL); + }); + EXPECT_CALL(decoder_callbacks_, decoderBufferLimit()).WillRepeatedly(Return(BufferSize)); + sendRequestHeadersPost(false); + Buffer::OwnedImpl req_body("Hello!"); + EXPECT_EQ(FilterDataStatus::Continue, filter_->decodeData(req_body, true)); + sendResponseHeaders(false); + Buffer::OwnedImpl resp_body("Hello!"); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_body, true)); +} + // gRPC failure while opening stream with only request body enabled in streamed mode and errors // ignored TEST_F(FastFailOrderingTest, GrpcErrorIgnoredOnStartRequestBodyStreamed) { diff --git a/test/extensions/filters/http/ext_proc/state_test.cc b/test/extensions/filters/http/ext_proc/state_test.cc new file mode 100644 index 0000000000000..7a9371443b0e4 --- /dev/null +++ b/test/extensions/filters/http/ext_proc/state_test.cc @@ -0,0 +1,89 @@ +#include "source/extensions/filters/http/ext_proc/processor_state.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace ExternalProcessing { +namespace { + +TEST(StateTest, EmptyQueue) { + ChunkQueue queue; + EXPECT_TRUE(queue.empty()); + EXPECT_EQ(0, queue.bytesEnqueued()); + EXPECT_FALSE(queue.pop(false)); +} + +TEST(StateTest, BasicQueue) { + ChunkQueue queue; + Buffer::OwnedImpl data1("Hello"); + queue.push(data1, false, false); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(5, queue.bytesEnqueued()); + auto popped = queue.pop(false); + EXPECT_EQ((*popped)->data.toString(), "Hello"); + EXPECT_TRUE(queue.empty()); + EXPECT_EQ(0, queue.bytesEnqueued()); +} + +TEST(StateTest, EnqueueDequeue) { + ChunkQueue queue; + Buffer::OwnedImpl data1("Hello"); + queue.push(data1, false, false); + Buffer::OwnedImpl data2(", World!"); + queue.push(data2, false, false); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(13, queue.bytesEnqueued()); + auto popped = queue.pop(false); + EXPECT_EQ((*popped)->data.toString(), "Hello"); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(8, queue.bytesEnqueued()); + Buffer::OwnedImpl data3("Bye"); + EXPECT_FALSE(queue.empty()); + queue.push(data3, false, false); + EXPECT_EQ(11, queue.bytesEnqueued()); + popped = queue.pop(false); + EXPECT_EQ((*popped)->data.toString(), ", World!"); + popped = queue.pop(false); + EXPECT_EQ((*popped)->data.toString(), "Bye"); + popped = queue.pop(false); + EXPECT_FALSE(popped); + EXPECT_TRUE(queue.empty()); + EXPECT_EQ(0, queue.bytesEnqueued()); +} + +TEST(StateTest, ConsolidateThree) { + ChunkQueue queue; + Buffer::OwnedImpl data1("Hello"); + queue.push(data1, false, false); + Buffer::OwnedImpl data2(", "); + queue.push(data2, false, false); + Buffer::OwnedImpl data3("World!"); + queue.push(data3, false, false); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(13, queue.bytesEnqueued()); + const auto& chunk = queue.consolidate(true); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(13, queue.bytesEnqueued()); + EXPECT_EQ(chunk.data.toString(), "Hello, World!"); + EXPECT_TRUE(chunk.delivered); +} + +TEST(StateTest, ConsolidateOne) { + ChunkQueue queue; + Buffer::OwnedImpl data1("Hello"); + queue.push(data1, false, false); + const auto& chunk = queue.consolidate(true); + EXPECT_FALSE(queue.empty()); + EXPECT_EQ(5, queue.bytesEnqueued()); + EXPECT_EQ(chunk.data.toString(), "Hello"); + EXPECT_TRUE(chunk.delivered); +} + +} // namespace +} // namespace ExternalProcessing +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc index 9451afb90565a..8bbc35672982c 100644 --- a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc @@ -57,7 +57,7 @@ class StreamingIntegrationTest : public HttpIntegrationTest, ->mutable_endpoint() ->mutable_address() ->mutable_socket_address(); - address->set_address("127.0.0.1"); + address->set_address(Network::Test::getLoopbackAddressString(ipVersion())); address->set_port_value(test_processor_.port()); // Ensure "HTTP2 with no prior knowledge." Necessary for gRPC. @@ -66,9 +66,9 @@ class StreamingIntegrationTest : public HttpIntegrationTest, ConfigHelper::setHttp2(*processor_cluster); // Make sure both flavors of gRPC client use the right address. - const auto addr = - std::make_shared("127.0.0.1", test_processor_.port()); - setGrpcService(*proto_config_.mutable_grpc_service(), "ext_proc_server", addr); + const auto addr = Network::Test::getCanonicalLoopbackAddress(ipVersion()); + const auto addr_port = Network::Utility::getAddressWithPort(*addr, test_processor_.port()); + setGrpcService(*proto_config_.mutable_grpc_service(), "ext_proc_server", addr_port); // Merge the filter. envoy::config::listener::v3::Filter ext_proc_filter; @@ -141,7 +141,7 @@ TEST_P(StreamingIntegrationTest, PostAndProcessHeadersOnly) { // This starts the gRPC server in the background. It'll be shut down when we stop the tests. test_processor_.start( - [](grpc::ServerReaderWriter* stream) { + ipVersion(), [](grpc::ServerReaderWriter* stream) { // This is the same gRPC stream processing code that a "user" of ext_proc // would write. In this case, we expect to receive a request_headers // message, and then close the stream. @@ -183,6 +183,7 @@ TEST_P(StreamingIntegrationTest, PostAndProcessBufferedRequestBody) { uint32_t total_size = num_chunks * chunk_size; test_processor_.start( + ipVersion(), [total_size](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; ASSERT_TRUE(stream->Read(&header_req)); @@ -200,7 +201,7 @@ TEST_P(StreamingIntegrationTest, PostAndProcessBufferedRequestBody) { EXPECT_EQ(body_req.request_body().body().size(), total_size); ProcessingResponse body_resp; - header_resp.mutable_request_body(); + body_resp.mutable_request_body(); stream->Write(body_resp); }); @@ -223,6 +224,7 @@ TEST_P(StreamingIntegrationTest, PostAndProcessStreamedRequestBody) { uint32_t total_size = num_chunks * chunk_size; test_processor_.start( + ipVersion(), [total_size](grpc::ServerReaderWriter* stream) { // Expect a request_headers message as the first message on the stream, // and send back an empty response. @@ -274,7 +276,7 @@ TEST_P(StreamingIntegrationTest, PostAndProcessStreamedRequestBodyPartially) { uint32_t total_size = num_chunks * chunk_size; test_processor_.start( - [](grpc::ServerReaderWriter* stream) { + ipVersion(), [](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; ASSERT_TRUE(stream->Read(&header_req)); ASSERT_TRUE(header_req.has_request_headers()); @@ -332,6 +334,7 @@ TEST_P(StreamingIntegrationTest, PostAndProcessStreamedRequestBodyAndClose) { uint32_t total_size = num_chunks * chunk_size; test_processor_.start( + ipVersion(), [total_size](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; ASSERT_TRUE(stream->Read(&header_req)); @@ -372,6 +375,7 @@ TEST_P(StreamingIntegrationTest, GetAndProcessBufferedResponseBody) { uint32_t response_size = 90000; test_processor_.start( + ipVersion(), [response_size](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; ASSERT_TRUE(stream->Read(&header_req)); @@ -409,8 +413,8 @@ TEST_P(StreamingIntegrationTest, GetAndProcessStreamedResponseBody) { uint32_t response_size = 170000; test_processor_.start( - [this, - response_size](grpc::ServerReaderWriter* stream) { + ipVersion(), [this, response_size]( + grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; ASSERT_TRUE(stream->Read(&header_req)); ASSERT_TRUE(header_req.has_request_headers()); @@ -467,8 +471,8 @@ TEST_P(StreamingIntegrationTest, PostAndProcessStreamBothBodies) { uint32_t response_size = 1700000; test_processor_.start( - [this, request_size, - response_size](grpc::ServerReaderWriter* stream) { + ipVersion(), [this, request_size, response_size]( + grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; ASSERT_TRUE(stream->Read(&header_req)); ASSERT_TRUE(header_req.has_request_headers()); @@ -554,7 +558,7 @@ TEST_P(StreamingIntegrationTest, PostAndStreamAndTransformBothBodies) { uint32_t response_size = 180000; test_processor_.start( - [](grpc::ServerReaderWriter* stream) { + ipVersion(), [](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; ASSERT_TRUE(stream->Read(&header_req)); ASSERT_TRUE(header_req.has_request_headers()); @@ -631,7 +635,7 @@ TEST_P(StreamingIntegrationTest, PostAndProcessBufferedRequestBodyTooBig) { uint32_t total_size = num_chunks * chunk_size; test_processor_.start( - [](grpc::ServerReaderWriter* stream) { + ipVersion(), [](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; ASSERT_TRUE(stream->Read(&header_req)); ASSERT_TRUE(header_req.has_request_headers()); @@ -659,6 +663,91 @@ TEST_P(StreamingIntegrationTest, PostAndProcessBufferedRequestBodyTooBig) { EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("413")); } +// Send a body that's smaller than the buffer limit, and have the processor +// request to see it in "buffered partial" form before allowing it to continue. +TEST_P(StreamingIntegrationTest, PostAndProcessBufferedPartialRequestBody) { + const uint32_t num_chunks = 99; + const uint32_t chunk_size = 1000; + uint32_t total_size = num_chunks * chunk_size; + + test_processor_.start( + ipVersion(), + [total_size](grpc::ServerReaderWriter* stream) { + ProcessingRequest header_req; + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); + + ProcessingResponse header_resp; + header_resp.mutable_request_headers(); + auto* override = header_resp.mutable_mode_override(); + override->set_request_body_mode(ProcessingMode::BUFFERED_PARTIAL); + stream->Write(header_resp); + + ProcessingRequest body_req; + ASSERT_TRUE(stream->Read(&body_req)); + ASSERT_TRUE(body_req.has_request_body()); + EXPECT_TRUE(body_req.request_body().end_of_stream()); + EXPECT_EQ(body_req.request_body().body().size(), total_size); + + ProcessingResponse body_resp; + body_resp.mutable_request_body(); + stream->Write(body_resp); + }); + + initializeConfig(); + HttpIntegrationTest::initialize(); + sendPostRequest(num_chunks, chunk_size, [total_size](Http::HeaderMap& headers) { + headers.addCopy(LowerCaseString("expect_request_size_bytes"), total_size); + }); + + ASSERT_TRUE(client_response_->waitForEndStream()); + EXPECT_TRUE(client_response_->complete()); + EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); +} + +// Send a body that's larger than the buffer limit, and have the processor +// request to see it in "buffered partial" form before allowing it to continue. +// The processor should see only part of the message. +TEST_P(StreamingIntegrationTest, PostAndProcessBufferedPartialBigRequestBody) { + const uint32_t num_chunks = 213; + const uint32_t chunk_size = 1000; + uint32_t total_size = num_chunks * chunk_size; + + test_processor_.start( + ipVersion(), + [total_size](grpc::ServerReaderWriter* stream) { + ProcessingRequest header_req; + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); + + ProcessingResponse header_resp; + header_resp.mutable_request_headers(); + auto* override = header_resp.mutable_mode_override(); + override->set_request_body_mode(ProcessingMode::BUFFERED_PARTIAL); + stream->Write(header_resp); + + ProcessingRequest body_req; + ASSERT_TRUE(stream->Read(&body_req)); + ASSERT_TRUE(body_req.has_request_body()); + EXPECT_FALSE(body_req.request_body().end_of_stream()); + EXPECT_LT(body_req.request_body().body().size(), total_size); + + ProcessingResponse body_resp; + body_resp.mutable_request_body(); + stream->Write(body_resp); + }); + + initializeConfig(); + HttpIntegrationTest::initialize(); + sendPostRequest(num_chunks, chunk_size, [total_size](Http::HeaderMap& headers) { + headers.addCopy(LowerCaseString("expect_request_size_bytes"), total_size); + }); + + ASSERT_TRUE(client_response_->waitForEndStream()); + EXPECT_TRUE(client_response_->complete()); + EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); +} + } // namespace ExternalProcessing } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/ext_proc/test_processor.cc b/test/extensions/filters/http/ext_proc/test_processor.cc index 735bfd707c1bf..47267ce8024fd 100644 --- a/test/extensions/filters/http/ext_proc/test_processor.cc +++ b/test/extensions/filters/http/ext_proc/test_processor.cc @@ -2,6 +2,9 @@ #include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "test/test_common/network_utility.h" + +#include "absl/strings/str_format.h" #include "grpc++/server_builder.h" namespace Envoy { @@ -23,11 +26,13 @@ grpc::Status ProcessorWrapper::Process( return grpc::Status::OK; } -void TestProcessor::start(ProcessingFunc cb) { +void TestProcessor::start(const Network::Address::IpVersion ip_version, ProcessingFunc cb) { wrapper_ = std::make_unique(cb); grpc::ServerBuilder builder; builder.RegisterService(wrapper_.get()); - builder.AddListeningPort("127.0.0.1:0", grpc::InsecureServerCredentials(), &listening_port_); + builder.AddListeningPort( + absl::StrFormat("%s:0", Network::Test::getLoopbackAddressUrlString(ip_version)), + grpc::InsecureServerCredentials(), &listening_port_); server_ = builder.BuildAndStart(); } diff --git a/test/extensions/filters/http/ext_proc/test_processor.h b/test/extensions/filters/http/ext_proc/test_processor.h index b3b8ad2ed8bbf..17fae05c77fe1 100644 --- a/test/extensions/filters/http/ext_proc/test_processor.h +++ b/test/extensions/filters/http/ext_proc/test_processor.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/network/address.h" #include "envoy/service/ext_proc/v3alpha/external_processor.grpc.pb.h" #include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" @@ -41,10 +42,10 @@ class ProcessorWrapper : public envoy::service::ext_proc::v3alpha::ExternalProce // use ASSERT_ and EXPECT_ macros to validate test results. class TestProcessor { public: - // Start the processor listening on an ephemeral port (port 0) on 127.0.0.1. + // Start the processor listening on an ephemeral port (port 0) on the local host. // All new streams will be delegated to the specified function. The function // will be invoked in a background thread controlled by the gRPC server. - void start(ProcessingFunc cb); + void start(const Network::Address::IpVersion ip_version, ProcessingFunc cb); // Stop the processor from listening once all streams are closed, and exit // the listening threads. diff --git a/test/extensions/filters/http/fault/fault_filter_test.cc b/test/extensions/filters/http/fault/fault_filter_test.cc index b4add362fb609..9b6e1347c9e3e 100644 --- a/test/extensions/filters/http/fault/fault_filter_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_test.cc @@ -147,9 +147,6 @@ class FaultFilterTest : public testing::Test { EXPECT_CALL(*timer_, disableTimer()); } - void TestPerFilterConfigFault(const Router::RouteSpecificFilterConfig* route_fault, - const Router::RouteSpecificFilterConfig* vhost_fault); - NiceMock stats_; FaultFilterConfigSharedPtr config_; std::unique_ptr filter_; @@ -1192,16 +1189,13 @@ TEST_F(FaultFilterTest, FaultWithTargetClusterNullRoute) { EXPECT_EQ(0UL, config_->stats().aborts_injected_.value()); } -void FaultFilterTest::TestPerFilterConfigFault( - const Router::RouteSpecificFilterConfig* route_fault, - const Router::RouteSpecificFilterConfig* vhost_fault) { +TEST_F(FaultFilterTest, RouteFaultOverridesListenerFault) { + setUpTest(v2_empty_fault_config_yaml); + Fault::FaultSettings delay_fault(convertYamlStrToProtoConfig(delay_with_upstream_cluster_yaml)); - ON_CALL(decoder_filter_callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.fault")) - .WillByDefault(Return(route_fault)); - ON_CALL(decoder_filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.fault")) - .WillByDefault(Return(vhost_fault)); + ON_CALL(*decoder_filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.fault")) + .WillByDefault(Return(&delay_fault)); const std::string upstream_cluster("www1"); @@ -1240,34 +1234,6 @@ void FaultFilterTest::TestPerFilterConfigFault( EXPECT_EQ(0UL, config_->stats().aborts_injected_.value()); } -TEST_F(FaultFilterTest, RouteFaultOverridesListenerFault) { - - Fault::FaultSettings abort_fault(convertYamlStrToProtoConfig(abort_only_yaml)); - Fault::FaultSettings delay_fault(convertYamlStrToProtoConfig(delay_with_upstream_cluster_yaml)); - - // route-level fault overrides listener-level fault - { - setUpTest(v2_empty_fault_config_yaml); // This is a valid listener level fault - TestPerFilterConfigFault(&delay_fault, nullptr); - } - - // virtual-host-level fault overrides listener-level fault - { - config_->stats().aborts_injected_.reset(); - config_->stats().delays_injected_.reset(); - setUpTest(v2_empty_fault_config_yaml); - TestPerFilterConfigFault(nullptr, &delay_fault); - } - - // route-level fault overrides virtual-host-level fault - { - config_->stats().aborts_injected_.reset(); - config_->stats().delays_injected_.reset(); - setUpTest(v2_empty_fault_config_yaml); - TestPerFilterConfigFault(&delay_fault, &abort_fault); - } -} - class FaultFilterRateLimitTest : public FaultFilterTest { public: void setupRateLimitTest(bool enable_runtime) { diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc index 6f8adcc40dda5..12e1b7b836d56 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc @@ -9,6 +9,7 @@ #include "test/test_common/utility.h" #include "absl/strings/match.h" +#include "fmt/printf.h" #include "gtest/gtest.h" using Envoy::Http::HeaderValueOf; @@ -26,17 +27,21 @@ class ReverseBridgeIntegrationTest : public testing::TestWithParam response_size_header) { setUpstreamProtocol(Http::CodecType::HTTP2); - const std::string filter = + const std::string filter = fmt::format( R"EOF( name: grpc_http1_reverse_bridge typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3.FilterConfig content_type: application/x-protobuf withhold_grpc_frames: true - )EOF"; + response_size_header: "{}" + )EOF", + response_size_header ? *response_size_header : ""); config_helper_.addFilter(filter); auto vhost = config_helper_.createVirtualHost("disabled"); @@ -157,10 +162,11 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRoute) { EXPECT_TRUE(absl::EndsWith(response->body(), response_data.toString())); // Comparing strings embedded zero literals is hard. Use string literal and std::equal to avoid - // truncating the string when it's converted to const char *. - const auto expected_prefix = "\0\0\0\0\4"s; + // truncating the string when it's converted to const char *. Hex value 0x5 is 5, the message + // length. + const auto expected_prefix = "\0\0\0\0\x5"s; EXPECT_TRUE( - std::equal(response->body().begin(), response->body().begin() + 4, expected_prefix.begin())); + std::equal(response->body().begin(), response->body().begin() + 5, expected_prefix.begin())); EXPECT_THAT(response->headers(), HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); EXPECT_THAT(*response->trailers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "0")); @@ -199,5 +205,123 @@ TEST_P(ReverseBridgeIntegrationTest, EnabledRouteBadContentType) { ASSERT_TRUE(fake_upstream_connection_->close()); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } + +// Verifies that we stream the response instead of buffering it, using an upstream-provided header +// to get the overall message length. +TEST_P(ReverseBridgeIntegrationTest, EnabledRouteStreamResponse) { + upstream_protocol_ = FakeHttpConnection::Type::HTTP1; + + initialize(absl::make_optional("custom-response-size-header")); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestRequestHeaderMapImpl request_headers({{":scheme", "http"}, + {":method", "POST"}, + {":authority", "foo"}, + {":path", "/testing.ExampleService/Print"}, + {"content-type", "application/grpc"}}); + + auto response = codec_client_->makeRequestWithBody(request_headers, "abcdef"); + + // Wait for upstream to finish the request. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Ensure that we stripped the length prefix and set the appropriate headers. + EXPECT_EQ("f", upstream_request_->body().toString()); + + EXPECT_THAT(upstream_request_->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); + EXPECT_THAT(upstream_request_->headers(), + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); + + // Respond to the request. + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/x-protobuf"); + const uint32_t upstream_response_size = 10; + response_headers.addCopy(Http::LowerCaseString("custom-response-size-header"), + std::to_string(upstream_response_size)); + upstream_request_->encodeHeaders(response_headers, false); + + Buffer::OwnedImpl response_data_begin{"hello"}; + upstream_request_->encodeData(response_data_begin, false); + // The downstream should be able to read this first piece of data before the stream ends. + response->waitForBodyData(5); + + Buffer::OwnedImpl response_data_end{"world"}; + upstream_request_->encodeData(response_data_end, true); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + response_data_begin.add(response_data_end); + + // Ensure that we restored the content-type and that we added the length prefix. + EXPECT_EQ(upstream_response_size + 5, response->body().size()); + EXPECT_TRUE(absl::EndsWith(response->body(), response_data_begin.toString())); + + // Comparing strings embedded zero literals is hard. Use string literal and std::equal to avoid + // truncating the string when it's converted to const char *. Hex value 0xA is 10, the message + // length. + const auto expected_prefix = "\0\0\0\0\xA"s; + EXPECT_TRUE( + std::equal(response->body().begin(), response->body().begin() + 5, expected_prefix.begin())); + EXPECT_THAT(response->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(*response->trailers(), HeaderValueOf(Http::Headers::get().GrpcStatus, "0")); + + codec_client_->close(); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); +} + +// Verifies that we can stream the response and proxy a reset. +TEST_P(ReverseBridgeIntegrationTest, EnabledRouteStreamWithholdResponse) { + upstream_protocol_ = FakeHttpConnection::Type::HTTP1; + + initialize(absl::make_optional("custom-response-size-header")); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestRequestHeaderMapImpl request_headers({{":scheme", "http"}, + {":method", "POST"}, + {":authority", "foo"}, + {":path", "/testing.ExampleService/Print"}, + {"content-type", "application/grpc"}}); + + auto response = codec_client_->makeRequestWithBody(request_headers, "abcdef"); + + // Wait for upstream to finish the request. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + // Ensure that we stripped the length prefix and set the appropriate headers. + EXPECT_EQ("f", upstream_request_->body().toString()); + + EXPECT_THAT(upstream_request_->headers(), + HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); + EXPECT_THAT(upstream_request_->headers(), + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); + + // Respond to the request. + Http::TestResponseHeaderMapImpl response_headers; + response_headers.setStatus(200); + response_headers.setContentType("application/x-protobuf"); + const uint32_t upstream_response_size = 10; + response_headers.addCopy(Http::LowerCaseString("custom-response-size-header"), + std::to_string(upstream_response_size)); + upstream_request_->encodeHeaders(response_headers, false); + + Buffer::OwnedImpl response_data{"helloworld"}; + upstream_request_->encodeData(response_data, false); + response->waitForBodyData(10); + + // If the upstream sends the full payload and also a reset, it should result in a reset rather + // than stream complete. + upstream_request_->encodeResetStream(); + ASSERT_TRUE(response->waitForReset()); +} } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc index c82c3b327ab6b..fa0199efee968 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_test.cc @@ -30,8 +30,9 @@ namespace { class ReverseBridgeTest : public testing::Test { protected: - void initialize(bool withhold_grpc_headers = true) { - filter_ = std::make_unique("application/x-protobuf", withhold_grpc_headers); + void initialize(bool withhold_grpc_headers = true, std::string custom_response_size_header = "") { + filter_ = std::make_unique("application/x-protobuf", withhold_grpc_headers, + custom_response_size_header); filter_->setDecoderFilterCallbacks(decoder_callbacks_); filter_->setEncoderFilterCallbacks(encoder_callbacks_); } @@ -614,8 +615,8 @@ TEST_F(ReverseBridgeTest, FilterConfigPerRouteDisabled) { filter_config_per_route.set_disabled(true); FilterConfigPerRoute filterConfigPerRoute(filter_config_per_route); - ON_CALL(decoder_callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.grpc_http1_reverse_bridge")) + ON_CALL(*decoder_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.grpc_http1_reverse_bridge")) .WillByDefault(testing::Return(&filterConfigPerRoute)); EXPECT_CALL(decoder_callbacks_, route()).Times(2); @@ -643,8 +644,8 @@ TEST_F(ReverseBridgeTest, FilterConfigPerRouteEnabled) { filter_config_per_route.set_disabled(false); FilterConfigPerRoute filterConfigPerRoute(filter_config_per_route); - ON_CALL(decoder_callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.grpc_http1_reverse_bridge")) + ON_CALL(*decoder_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.grpc_http1_reverse_bridge")) .WillByDefault(testing::Return(&filterConfigPerRoute)); { @@ -731,8 +732,8 @@ TEST_F(ReverseBridgeTest, RouteWithTrailers) { filter_config_per_route.set_disabled(false); FilterConfigPerRoute filterConfigPerRoute(filter_config_per_route); - ON_CALL(decoder_callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.grpc_http1_reverse_bridge")) + ON_CALL(*decoder_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.grpc_http1_reverse_bridge")) .WillByDefault(testing::Return(&filterConfigPerRoute)); { @@ -801,6 +802,198 @@ TEST_F(ReverseBridgeTest, RouteWithTrailers) { } } +// Verifies that the filter streams responses when it's configured to set the content length based +// on a header returned from the upstream. +TEST_F(ReverseBridgeTest, WithholdGrpcStreamResponse) { + initialize(true, "custom-content-length"); + decoder_callbacks_.is_grpc_request_ = true; + + { + EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr)); + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + Http::TestRequestHeaderMapImpl headers({{"content-type", "application/grpc"}, + {"content-length", "25"}, + {":path", "/testing.ExampleService/SendData"}}); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "20")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); + } + + { + // We should remove the first five bytes. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("fgh", buffer.toString()); + } + + { + // Subsequent calls to decodeData should do nothing. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("abcdefgh", buffer.toString()); + } + + { + Http::TestRequestTrailerMapImpl trailers; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); + } + + Http::TestResponseHeaderMapImpl headers( + {{":status", "200"}, + // This is the total length of the 2 buffers encoded separately below. + {"custom-content-length", "8"}, + {"content-type", "application/x-protobuf"}}); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "13")); + + { + // The response data should be streamed to the client instead of buffered. Additionally, the + // first call should prefix the buffer with the size. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abc", 4); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, false)); + EXPECT_EQ(9, buffer.length()); + } + { + // The last call should insert the gRPC status into trailers. We've already sent the gRPC frame + // header, so the buffer should only contain the upstream response payload. + Http::TestResponseTrailerMapImpl trailers; + EXPECT_CALL(encoder_callbacks_, addEncodedTrailers()).WillOnce(ReturnRef(trailers)); + + Envoy::Buffer::OwnedImpl buffer; + buffer.add("ghj", 4); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, true)); + EXPECT_EQ(4, buffer.length()); + EXPECT_THAT(trailers, HeaderValueOf(Http::Headers::get().GrpcStatus, "0")); + } +} + +// Verifies that the filter returns a useful error message when it's configured to set the content +// length based on a header returned from the upstream that is missing. +TEST_F(ReverseBridgeTest, WithholdGrpcStreamResponseNoContentLength) { + initialize(true, "custom-content-length"); + decoder_callbacks_.is_grpc_request_ = true; + + { + EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr)); + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + Http::TestRequestHeaderMapImpl headers( + {{"content-type", "application/grpc"}, {":path", "/testing.ExampleService/SendData"}}); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); + } + + { + // We should remove the first five bytes. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("fgh", buffer.toString()); + } + + { + // Subsequent calls to decodeData should do nothing. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("abcdefgh", buffer.toString()); + } + + Http::TestRequestTrailerMapImpl trailers; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); + + Http::TestResponseHeaderMapImpl headers( + {{":status", "200"}, {"content-type", "application/x-protobuf"}}); + EXPECT_CALL( + decoder_callbacks_, + sendLocalReply( + Http::Code::OK, "envoy reverse bridge: upstream did not set content length", _, + absl::make_optional(static_cast(Grpc::Status::Internal)), _)); + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(_, _)); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->encodeHeaders(headers, false)); +} + +// Verifies that the filter returns a useful error message when it's configured to set the content +// length based on a header returned from the upstream that indicates the wrong content length. +TEST_F(ReverseBridgeTest, WithholdGrpcStreamResponseWrongContentLength) { + initialize(true, "custom-content-length"); + decoder_callbacks_.is_grpc_request_ = true; + + { + EXPECT_CALL(decoder_callbacks_, route()).WillRepeatedly(testing::Return(nullptr)); + EXPECT_CALL(decoder_callbacks_, clearRouteCache()); + Http::TestRequestHeaderMapImpl headers( + {{"content-type", "application/grpc"}, {":path", "/testing.ExampleService/SendData"}}); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/x-protobuf")); + EXPECT_THAT(headers, + HeaderValueOf(Http::CustomHeaders::get().Accept, "application/x-protobuf")); + } + + { + // We should remove the first five bytes. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("fgh", buffer.toString()); + } + + { + // Subsequent calls to decodeData should do nothing. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abcdefgh", 8); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(buffer, false)); + EXPECT_EQ("abcdefgh", buffer.toString()); + } + + Http::TestRequestTrailerMapImpl trailers; + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers)); + + Http::TestResponseHeaderMapImpl headers( + {{":status", "200"}, + // This is not the correct size of the upstream response payload, since we only send 8 bytes + // before ending the stream below. + {"custom-content-length", "30"}, + {"content-type", "application/x-protobuf"}}); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(headers, false)); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentType, "application/grpc")); + EXPECT_THAT(headers, HeaderValueOf(Http::Headers::get().ContentLength, "35")); + + { + // The response data should be streamed to the client instead of buffered. Additionally, the + // first call should prefix the buffer with the size. + Envoy::Buffer::OwnedImpl buffer; + buffer.add("abc", 4); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(buffer, false)); + EXPECT_EQ(9, buffer.length()); + } + { + // The last call should insert the gRPC status into trailers. We've already sent the gRPC frame + // header, so the buffer should only contain the upstream response payload. + Http::TestResponseTrailerMapImpl trailers; + EXPECT_CALL(encoder_callbacks_, addEncodedTrailers()).WillOnce(ReturnRef(trailers)); + + Envoy::Buffer::OwnedImpl buffer; + buffer.add("ghj", 4); + EXPECT_CALL( + encoder_callbacks_, + sendLocalReply( + Http::Code::OK, "envoy reverse bridge: upstream set incorrect content length", _, + absl::make_optional(static_cast(Grpc::Status::Internal)), _)); + EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(buffer, true)); + EXPECT_EQ(4, buffer.length()); + EXPECT_THAT(trailers, HeaderValueOf(Http::Headers::get().GrpcStatus, "0")); + } +} } // namespace } // namespace GrpcHttp1ReverseBridge } // namespace HttpFilters diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index bc2e84d88f03a..e72327d13094b 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -366,16 +366,6 @@ class GrpcJsonTranscoderFilterTest : public testing::Test, public GrpcJsonTransc return TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"); } - void routeLocalConfig(const Router::RouteSpecificFilterConfig* route_settings, - const Router::RouteSpecificFilterConfig* vhost_settings) { - ON_CALL(decoder_callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.grpc_json_transcoder")) - .WillByDefault(Return(route_settings)); - ON_CALL(decoder_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.grpc_json_transcoder")) - .WillByDefault(Return(vhost_settings)); - } - // TODO(lizan): Add a mock of JsonTranscoderConfig and test more error cases. JsonTranscoderConfig config_; JsonTranscoderFilter filter_; @@ -402,18 +392,10 @@ TEST_F(GrpcJsonTranscoderFilterTest, PerRouteDisabledConfigOverride) { envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder route_cfg; route_cfg.set_proto_descriptor_bin(""); JsonTranscoderConfig route_config(route_cfg, *api_); - routeLocalConfig(&route_config, nullptr); - - Http::TestRequestHeaderMapImpl headers; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, false)); -} - -TEST_F(GrpcJsonTranscoderFilterTest, PerVHostDisabledConfigOverride) { - envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder vhost_cfg; - vhost_cfg.set_proto_descriptor_bin(""); - JsonTranscoderConfig vhost_config(vhost_cfg, *api_); - routeLocalConfig(nullptr, &vhost_config); + ON_CALL(*decoder_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.grpc_json_transcoder")) + .WillByDefault(Return(&route_config)); Http::TestRequestHeaderMapImpl headers; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers, false)); } @@ -1507,25 +1489,10 @@ TEST_F(GrpcJsonTranscoderDisabledFilterTest, PerRouteEnabledOverride) { envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder route_cfg = bookstoreProtoConfig(); JsonTranscoderConfig route_config(route_cfg, *api_); - routeLocalConfig(&route_config, nullptr); - - Http::TestRequestHeaderMapImpl request_headers{ - {"content-type", "application/json"}, {":method", "POST"}, {":path", "/shelf"}}; - EXPECT_CALL(decoder_callbacks_, clearRouteCache()); - - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(request_headers, false)); - EXPECT_EQ("application/grpc", request_headers.get_("content-type")); - EXPECT_EQ("/shelf", request_headers.get_("x-envoy-original-path")); - EXPECT_EQ("/bookstore.Bookstore/CreateShelf", request_headers.get_(":path")); - EXPECT_EQ("trailers", request_headers.get_("te")); -} - -TEST_F(GrpcJsonTranscoderDisabledFilterTest, PerVhostEnabledOverride) { - envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder vhost_cfg = - bookstoreProtoConfig(); - JsonTranscoderConfig vhost_config(vhost_cfg, *api_); - routeLocalConfig(nullptr, &vhost_config); + ON_CALL(*decoder_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.grpc_json_transcoder")) + .WillByDefault(Return(&route_config)); Http::TestRequestHeaderMapImpl request_headers{ {"content-type", "application/json"}, {":method", "POST"}, {":path", "/shelf"}}; diff --git a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc index e22828878fe08..02c5d56f6e8ec 100644 --- a/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc +++ b/test/extensions/filters/http/header_to_metadata/header_to_metadata_filter_test.cc @@ -135,8 +135,8 @@ TEST_F(HeaderToMetadataTest, PerRouteOverride) { envoy::extensions::filters::http::header_to_metadata::v3::Config config_proto; TestUtility::loadFromYaml(request_config_yaml, config_proto); Config per_route_config(config_proto, true); - EXPECT_CALL(decoder_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.header_to_metadata")) + EXPECT_CALL(*decoder_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.header_to_metadata")) .WillOnce(Return(&per_route_config)); EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(req_info_)); @@ -161,8 +161,8 @@ TEST_F(HeaderToMetadataTest, ConfigIsCached) { envoy::extensions::filters::http::header_to_metadata::v3::Config config_proto; TestUtility::loadFromYaml(request_config_yaml, config_proto); Config per_route_config(config_proto, true); - EXPECT_CALL(decoder_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.header_to_metadata")) + EXPECT_CALL(*decoder_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.header_to_metadata")) .WillOnce(Return(&per_route_config)); EXPECT_TRUE(getConfig()->doRequest()); diff --git a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc index a19ac094a9a82..5c7c666af5dec 100644 --- a/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc +++ b/test/extensions/filters/http/ip_tagging/ip_tagging_filter_test.cc @@ -65,7 +65,8 @@ TEST_F(IpTaggingFilterTest, InternalRequest) { Network::Address::InstanceConstSharedPtr remote_address = Network::Utility::parseInternetAddress("1.2.3.5"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_CALL(stats_, counter("prefix.ip_tagging.internal_request.hit")); EXPECT_CALL(stats_, counter("prefix.ip_tagging.total")); @@ -100,7 +101,8 @@ request_type: external Network::Address::InstanceConstSharedPtr remote_address = Network::Utility::parseInternetAddress("1.2.3.4"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ("external_request", request_headers.get_(Http::Headers::get().EnvoyIpTags)); @@ -137,14 +139,16 @@ request_type: both Network::Address::InstanceConstSharedPtr remote_address = Network::Utility::parseInternetAddress("1.2.3.5"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ("internal_request", request_headers.get_(Http::Headers::get().EnvoyIpTags)); request_headers = Http::TestRequestHeaderMapImpl{}; remote_address = Network::Utility::parseInternetAddress("1.2.3.4"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ("external_request", request_headers.get_(Http::Headers::get().EnvoyIpTags)); @@ -156,7 +160,8 @@ TEST_F(IpTaggingFilterTest, NoHits) { Network::Address::InstanceConstSharedPtr remote_address = Network::Utility::parseInternetAddress("10.2.3.5"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_CALL(stats_, counter("prefix.ip_tagging.no_hit")); EXPECT_CALL(stats_, counter("prefix.ip_tagging.total")); @@ -176,7 +181,8 @@ TEST_F(IpTaggingFilterTest, AppendEntry) { Network::Address::InstanceConstSharedPtr remote_address = Network::Utility::parseInternetAddress("1.2.3.5"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ("test,internal_request", request_headers.get_(Http::Headers::get().EnvoyIpTags)); @@ -204,7 +210,8 @@ request_type: both Network::Address::InstanceConstSharedPtr remote_address = Network::Utility::parseInternetAddress("1.2.3.4"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_CALL(stats_, counter("prefix.ip_tagging.total")); EXPECT_CALL(stats_, counter("prefix.ip_tagging.internal_request.hit")); @@ -235,7 +242,8 @@ TEST_F(IpTaggingFilterTest, Ipv6Address) { Network::Address::InstanceConstSharedPtr remote_address = Network::Utility::parseInternetAddress("2001:abcd:ef01:2345::1"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ("ipv6_request", request_headers.get_(Http::Headers::get().EnvoyIpTags)); @@ -264,7 +272,8 @@ TEST_F(IpTaggingFilterTest, ClearRouteCache) { Network::Address::InstanceConstSharedPtr remote_address = Network::Utility::parseInternetAddress("1.2.3.5"); - filter_callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress(remote_address); + filter_callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + remote_address); EXPECT_CALL(filter_callbacks_, clearRouteCache()); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, false)); diff --git a/test/extensions/filters/http/jwt_authn/extractor_test.cc b/test/extensions/filters/http/jwt_authn/extractor_test.cc index b7dee6776f559..2adaf35e7e463 100644 --- a/test/extensions/filters/http/jwt_authn/extractor_test.cc +++ b/test/extensions/filters/http/jwt_authn/extractor_test.cc @@ -54,6 +54,15 @@ const char ExampleConfig[] = R"( from_headers: - name: prefix-header value_prefix: '"CCCDDD"' + provider9: + issuer: issuer9 + from_cookies: + - token-cookie + - token-cookie-2 + provider10: + issuer: issuer10 + from_cookies: + - token-cookie-3 )"; class ExtractorTest : public testing::Test { @@ -265,6 +274,30 @@ TEST_F(ExtractorTest, TestCustomParamToken) { tokens[0]->removeJwt(headers); } +// Test extracting token from a cookie +TEST_F(ExtractorTest, TestCookieToken) { + auto headers = TestRequestHeaderMapImpl{ + {"cookie", "token-cookie=token-cookie-value; token-cookie-2=token-cookie-value-2"}, + {"cookie", "token-cookie-3=\"token-cookie-value-3\""}}; + auto tokens = extractor_->extract(headers); + EXPECT_EQ(tokens.size(), 3); + + // only issuer9 has specified "token-cookie" cookie location. + EXPECT_EQ(tokens[0]->token(), "token-cookie-value"); + EXPECT_TRUE(tokens[0]->isIssuerAllowed("issuer9")); + EXPECT_FALSE(tokens[0]->isIssuerAllowed("issuer10")); + + // only issuer9 has specified "token-cookie-2" cookie location. + EXPECT_EQ(tokens[1]->token(), "token-cookie-value-2"); + EXPECT_TRUE(tokens[1]->isIssuerAllowed("issuer9")); + EXPECT_FALSE(tokens[1]->isIssuerAllowed("issuer10")); + + // only issuer10 has specified "token-cookie-3" cookie location. + EXPECT_EQ(tokens[2]->token(), "token-cookie-value-3"); + EXPECT_TRUE(tokens[2]->isIssuerAllowed("issuer10")); + EXPECT_FALSE(tokens[2]->isIssuerAllowed("issuer9")); +} + // Test extracting multiple tokens. TEST_F(ExtractorTest, TestMultipleTokens) { auto headers = TestRequestHeaderMapImpl{ diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index 325a37e51b10b..6afa8ac3e8ec4 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -326,31 +326,11 @@ TEST_F(FilterTest, TestNoRoute) { EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_)); } -// Test if routeEntry() return null, fallback to call config config. -TEST_F(FilterTest, TestNoRouteEnty) { - EXPECT_CALL(filter_callbacks_, route()).WillOnce(Return(mock_route_)); - // routeEntry() call return nullptr - EXPECT_CALL(*mock_route_, routeEntry()).WillOnce(Return(nullptr)); - - // Calling the findVerifier from filter config. - EXPECT_CALL(*mock_config_.get(), findVerifier(_, _)).WillOnce(Return(nullptr)); - // findPerRouteVerifier is not called. - EXPECT_CALL(*mock_config_.get(), findPerRouteVerifier(_)).Times(0); - - auto headers = Http::TestRequestHeaderMapImpl{}; - EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); - EXPECT_EQ(1U, mock_config_->stats().allowed_.value()); - - Buffer::OwnedImpl data(""); - EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data, false)); - EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(trailers_)); -} - // Test if no per-route config, fallback to call config config. TEST_F(FilterTest, TestNoPerRouteConfig) { EXPECT_CALL(filter_callbacks_, route()).WillOnce(Return(mock_route_)); // perFilterConfig return nullptr. - EXPECT_CALL(mock_route_->route_entry_, perFilterConfig("envoy.filters.http.jwt_authn")) + EXPECT_CALL(*mock_route_, mostSpecificPerFilterConfig("envoy.filters.http.jwt_authn")) .WillOnce(Return(nullptr)); // Calling the findVerifier from filter config. @@ -370,7 +350,7 @@ TEST_F(FilterTest, TestNoPerRouteConfig) { // Test bypass requirement from per-route config TEST_F(FilterTest, TestPerRouteBypass) { EXPECT_CALL(filter_callbacks_, route()).WillOnce(Return(mock_route_)); - EXPECT_CALL(mock_route_->route_entry_, perFilterConfig("envoy.filters.http.jwt_authn")) + EXPECT_CALL(*mock_route_, mostSpecificPerFilterConfig("envoy.filters.http.jwt_authn")) .WillOnce(Return(per_route_config_.get())); // findVerifier is not called. @@ -391,7 +371,7 @@ TEST_F(FilterTest, TestPerRouteBypass) { // Test per-route config with wrong requirement_name TEST_F(FilterTest, TestPerRouteWrongRequirementName) { EXPECT_CALL(filter_callbacks_, route()).WillOnce(Return(mock_route_)); - EXPECT_CALL(mock_route_->route_entry_, perFilterConfig("envoy.filters.http.jwt_authn")) + EXPECT_CALL(*mock_route_, mostSpecificPerFilterConfig("envoy.filters.http.jwt_authn")) .WillOnce(Return(per_route_config_.get())); // findVerifier is not called. @@ -415,7 +395,7 @@ TEST_F(FilterTest, TestPerRouteWrongRequirementName) { // Test verifier from per-route config TEST_F(FilterTest, TestPerRouteVerifierOK) { EXPECT_CALL(filter_callbacks_, route()).WillOnce(Return(mock_route_)); - EXPECT_CALL(mock_route_->route_entry_, perFilterConfig("envoy.filters.http.jwt_authn")) + EXPECT_CALL(*mock_route_, mostSpecificPerFilterConfig("envoy.filters.http.jwt_authn")) .WillOnce(Return(per_route_config_.get())); // findVerifier is not called. diff --git a/test/extensions/filters/http/kill_request/kill_request_filter_test.cc b/test/extensions/filters/http/kill_request/kill_request_filter_test.cc index e804788142b64..b6fde6589e2a3 100644 --- a/test/extensions/filters/http/kill_request/kill_request_filter_test.cc +++ b/test/extensions/filters/http/kill_request/kill_request_filter_test.cc @@ -99,8 +99,8 @@ TEST_F(KillRequestFilterTest, KillRequestEnabledFromRouteLevelConfiguration) { KillSettings kill_settings = KillSettings(route_level_kill_request); ON_CALL(random_generator_, random()).WillByDefault(Return(0)); - ON_CALL(decoder_filter_callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.kill_request")) + ON_CALL(*decoder_filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.kill_request")) .WillByDefault(Return(&kill_settings)); EXPECT_DEATH(filter_->decodeHeaders(request_headers_, false), ""); } @@ -114,8 +114,8 @@ TEST_F(KillRequestFilterTest, KillRequestDisabledRouteLevelConfiguration) { request_headers_.addCopy("x-envoy-kill-request", "true"); ON_CALL(random_generator_, random()).WillByDefault(Return(0)); - ON_CALL(decoder_filter_callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.kill_request")) + ON_CALL(*decoder_filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.kill_request")) .WillByDefault(Return(nullptr)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); } @@ -219,8 +219,8 @@ TEST_F(KillRequestFilterTest, PerRouteKillSettingFound) { // Return valid kill setting on the REQUEST direction const KillSettings kill_settings(route_level_kill_request); - ON_CALL(decoder_filter_callbacks_.route_->route_entry_, - perFilterConfig("envoy.filters.http.kill_request")) + ON_CALL(*decoder_filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.kill_request")) .WillByDefault(Return(&kill_settings)); ASSERT_EQ(filter_->decodeHeaders(request_headers_, false), Http::FilterHeadersStatus::Continue); } diff --git a/test/extensions/filters/http/lua/BUILD b/test/extensions/filters/http/lua/BUILD index f19e5db07370a..1ed127f9a5dc8 100644 --- a/test/extensions/filters/http/lua/BUILD +++ b/test/extensions/filters/http/lua/BUILD @@ -52,6 +52,7 @@ envoy_extension_cc_test( extension_names = ["envoy.filters.http.lua"], deps = [ "//source/extensions/filters/http/lua:config", + "//test/config:v2_link_hacks", "//test/integration:http_integration_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index 82d1623ff02cd..a7ae58a4f2003 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -114,8 +114,7 @@ class LuaHttpFilterTest : public testing::Test { void setupMetadata(const std::string& yaml) { TestUtility::loadFromYaml(yaml, metadata_); - EXPECT_CALL(decoder_callbacks_.route_->route_entry_, metadata()) - .WillOnce(testing::ReturnRef(metadata_)); + ON_CALL(*decoder_callbacks_.route_, metadata()).WillByDefault(testing::ReturnRef(metadata_)); } NiceMock server_factory_context_; @@ -1795,7 +1794,7 @@ TEST_F(LuaHttpFilterTest, GetRequestedServerName) { EXPECT_CALL(decoder_callbacks_, streamInfo()).WillOnce(ReturnRef(stream_info_)); absl::string_view server_name = "foo.example.com"; - stream_info_.downstream_address_provider_->setRequestedServerName(server_name); + stream_info_.downstream_connection_info_provider_->setRequestedServerName(server_name); Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("foo.example.com"))); @@ -1913,7 +1912,7 @@ TEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnection) { const auto connection_info = std::make_shared(); EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); - EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info_.downstream_connection_info_provider_->setSslConnection(connection_info); EXPECT_CALL(*connection_info, peerCertificatePresented()).WillOnce(Return(true)); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("peerCertificatePresented"))); @@ -2011,7 +2010,7 @@ TEST_F(LuaHttpFilterTest, InspectStreamInfoDowstreamSslConnectionOnPlainConnecti setup(SCRIPT); EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); - EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(nullptr)); + stream_info_.downstream_connection_info_provider_->setSslConnection(nullptr); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("downstreamSslConnection is nil"))); @@ -2034,7 +2033,7 @@ TEST_F(LuaHttpFilterTest, SurviveMultipleDownstreamSslConnectionCalls) { const auto connection_info = std::make_shared(); EXPECT_CALL(decoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(stream_info_)); - EXPECT_CALL(stream_info_, downstreamSslConnection()).WillRepeatedly(Return(connection_info)); + stream_info_.downstream_connection_info_provider_->setSslConnection(connection_info); for (uint64_t i = 0; i < 200; i++) { EXPECT_CALL(*filter_, @@ -2185,7 +2184,7 @@ TEST_F(LuaHttpFilterTest, LuaFilterDisabled) { EXPECT_CALL(decoder_callbacks_, clearRouteCache()); - ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.lua")) + ON_CALL(*decoder_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.lua")) .WillByDefault(Return(nullptr)); Http::TestRequestHeaderMapImpl request_headers_1{{":path", "/"}}; @@ -2193,7 +2192,7 @@ TEST_F(LuaHttpFilterTest, LuaFilterDisabled) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_1, true)); EXPECT_EQ("world", request_headers_1.get_("hello")); - ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.lua")) + ON_CALL(*decoder_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.lua")) .WillByDefault(Return(per_route_config_.get())); Http::TestRequestHeaderMapImpl request_headers_2{{":path", "/"}}; @@ -2229,7 +2228,7 @@ TEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodes) { setupConfig(proto_config, per_route_proto_config); setupFilter(); - ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.lua")) + ON_CALL(*decoder_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.lua")) .WillByDefault(Return(per_route_config_.get())); Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; @@ -2258,7 +2257,7 @@ TEST_F(LuaHttpFilterTest, LuaFilterRefSourceCodeNotExist) { setupConfig(proto_config, per_route_proto_config); setupFilter(); - ON_CALL(decoder_callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.lua")) + ON_CALL(*decoder_callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.lua")) .WillByDefault(Return(per_route_config_.get())); Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index 90bd7d44c0208..a1ec02924953c 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -1,6 +1,7 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "test/test_common/utility.h" @@ -77,9 +78,7 @@ class LuaIntegrationTest : public testing::TestWithParamstartRequest(default_headers); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + + EXPECT_EQ("bar", + response->headers().get(Http::LowerCaseString("foo"))[0]->value().getStringView()); + + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ("hello", response->body()); + + cleanup(); +} + // Test whether Rds can correctly deliver LuaPerRoute configuration. TEST_P(LuaIntegrationTest, RdsTestOfLuaPerRoute) { // When the route configuration is updated dynamically via RDS and the configuration contains an diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index 7b7b940149062..546073baa67ed 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -289,8 +289,9 @@ TEST_F(LuaStreamInfoWrapperTest, ReturnCurrentDownstreamAddresses) { new Network::Address::Ipv4Instance("127.0.0.1", 8000)}; auto downstream_direct_remote = Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv4Instance("8.8.8.8", 3000)}; - stream_info.downstream_address_provider_->setLocalAddress(address); - stream_info.downstream_address_provider_->setDirectRemoteAddressForTest(downstream_direct_remote); + stream_info.downstream_connection_info_provider_->setLocalAddress(address); + stream_info.downstream_connection_info_provider_->setDirectRemoteAddressForTest( + downstream_direct_remote); Filters::Common::Lua::LuaDeathRef wrapper( StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true); EXPECT_CALL(printer_, testPrint(address->asString())); @@ -310,7 +311,7 @@ TEST_F(LuaStreamInfoWrapperTest, ReturnRequestedServerName) { setup(SCRIPT); NiceMock stream_info; - stream_info.downstream_address_provider_->setRequestedServerName("some.sni.io"); + stream_info.downstream_connection_info_provider_->setRequestedServerName("some.sni.io"); Filters::Common::Lua::LuaDeathRef wrapper( StreamInfoWrapper::create(coroutine_->luaState(), stream_info), true); EXPECT_CALL(printer_, testPrint("some.sni.io")); diff --git a/test/extensions/filters/http/original_src/original_src_test.cc b/test/extensions/filters/http/original_src/original_src_test.cc index c8b9d1c5c5983..5839baa88b9e5 100644 --- a/test/extensions/filters/http/original_src/original_src_test.cc +++ b/test/extensions/filters/http/original_src/original_src_test.cc @@ -49,7 +49,7 @@ class OriginalSrcHttpTest : public testing::Test { } void setAddressToReturn(const std::string& address) { - callbacks_.stream_info_.downstream_address_provider_->setRemoteAddress( + callbacks_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( Network::Utility::resolveUrl(address)); } @@ -94,8 +94,8 @@ TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressAddsOption) { for (const auto& option : *options) { option->setOption(socket, envoy::config::core::v3::SocketOption::STATE_PREBIND); } - EXPECT_EQ(*socket.addressProvider().localAddress(), - *callbacks_.stream_info_.downstream_address_provider_->remoteAddress()); + EXPECT_EQ(*socket.connectionInfoProvider().localAddress(), + *callbacks_.stream_info_.downstream_connection_info_provider_->remoteAddress()); } TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressUsesCorrectAddress) { @@ -129,7 +129,7 @@ TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressBleachesPort) { for (const auto& option : *options) { option->setOption(socket, envoy::config::core::v3::SocketOption::STATE_PREBIND); } - EXPECT_EQ(*socket.addressProvider().localAddress(), *expected_address); + EXPECT_EQ(*socket.connectionInfoProvider().localAddress(), *expected_address); } TEST_F(OriginalSrcHttpTest, FilterAddsTransparentOption) { @@ -200,7 +200,7 @@ TEST_F(OriginalSrcHttpTest, TrailersAndDataEndStreamDoNothing) { // This will be invoked in decodeHeaders. EXPECT_CALL(callbacks, addUpstreamSocketOptions(_)); EXPECT_CALL(callbacks, streamInfo()); - callbacks.stream_info_.downstream_address_provider_->setRemoteAddress( + callbacks.stream_info_.downstream_connection_info_provider_->setRemoteAddress( Network::Utility::parseInternetAddress("1.2.3.4")); filter->decodeHeaders(headers_, true); @@ -217,7 +217,7 @@ TEST_F(OriginalSrcHttpTest, TrailersAndDataNotEndStreamDoNothing) { // This will be invoked in decodeHeaders. EXPECT_CALL(callbacks, addUpstreamSocketOptions(_)); EXPECT_CALL(callbacks, streamInfo()); - callbacks.stream_info_.downstream_address_provider_->setRemoteAddress( + callbacks.stream_info_.downstream_connection_info_provider_->setRemoteAddress( Network::Utility::parseInternetAddress("1.2.3.4")); filter->decodeHeaders(headers_, false); diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index 932475d2dcb45..a20b2d9054d60 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -22,15 +22,12 @@ namespace Envoy { namespace { // Tests Ratelimit functionality with config in filter. -class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, +class RatelimitIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { public: RatelimitIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion()) {} - void SetUp() override { - XDS_DEPRECATED_FEATURE_TEST_SKIP; - initialize(); - } + void SetUp() override { initialize(); } void createUpstreams() override { setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); @@ -46,9 +43,6 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara } void initialize() override { - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { - config_helper_.enableDeprecatedV2Api(); - } config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* ratelimit_cluster = bootstrap.mutable_static_resources()->add_clusters(); ratelimit_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); @@ -62,7 +56,8 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara proto_config_.set_disable_x_envoy_ratelimited_header(disable_x_envoy_ratelimited_header_); setGrpcService(*proto_config_.mutable_rate_limit_service()->mutable_grpc_service(), "ratelimit", fake_upstreams_.back()->localAddress()); - proto_config_.mutable_rate_limit_service()->set_transport_api_version(apiVersion()); + proto_config_.mutable_rate_limit_service()->set_transport_api_version( + envoy::config::core::v3::ApiVersion::V3); envoy::config::listener::v3::Filter ratelimit_filter; ratelimit_filter.set_name("envoy.filters.http.ratelimit"); @@ -111,8 +106,7 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara result = ratelimit_requests_[i]->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); EXPECT_EQ("POST", ratelimit_requests_[i]->headers().getMethodValue()); - EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.ratelimit.{}.RateLimitService", - "ShouldRateLimit", apiVersion()), + EXPECT_EQ("/envoy.service.ratelimit.v3.RateLimitService/ShouldRateLimit", ratelimit_requests_[i]->headers().getPathValue()); EXPECT_EQ("application/grpc", ratelimit_requests_[i]->headers().getContentTypeValue()); @@ -238,26 +232,22 @@ class RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest }; INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFailureModeIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFilterHeadersEnabledIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); -TEST_P(RatelimitIntegrationTest, Ok) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; - basicFlow(); -} +TEST_P(RatelimitIntegrationTest, Ok) { basicFlow(); } TEST_P(RatelimitIntegrationTest, OkWithHeaders) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); Http::TestResponseHeaderMapImpl ratelimit_response_headers{{"x-ratelimit-limit", "1000"}, @@ -282,7 +272,6 @@ TEST_P(RatelimitIntegrationTest, OkWithHeaders) { } TEST_P(RatelimitIntegrationTest, OverLimit) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, @@ -301,7 +290,6 @@ TEST_P(RatelimitIntegrationTest, OverLimit) { } TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); Http::TestResponseHeaderMapImpl ratelimit_response_headers{ @@ -329,7 +317,6 @@ TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { } TEST_P(RatelimitIntegrationTest, Error) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); ratelimit_requests_[0]->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "404"}}, true); @@ -344,7 +331,6 @@ TEST_P(RatelimitIntegrationTest, Error) { } TEST_P(RatelimitIntegrationTest, Timeout) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); switch (clientType()) { @@ -368,7 +354,6 @@ TEST_P(RatelimitIntegrationTest, Timeout) { } TEST_P(RatelimitIntegrationTest, ConnectImmediateDisconnect) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); ASSERT_TRUE(fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, fake_ratelimit_connection_)); ASSERT_TRUE(fake_ratelimit_connection_->close()); @@ -380,7 +365,6 @@ TEST_P(RatelimitIntegrationTest, ConnectImmediateDisconnect) { } TEST_P(RatelimitIntegrationTest, FailedConnect) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; // Do not reset the fake upstream for the ratelimiter, but have it stop listening. // If we reset, the Envoy will continue to send H2 to the original rate limiter port, which may // be used by another test, and data sent to that port "unexpectedly" will cause problems for @@ -393,7 +377,6 @@ TEST_P(RatelimitIntegrationTest, FailedConnect) { } TEST_P(RatelimitFailureModeIntegrationTest, ErrorWithFailureModeOff) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); ratelimit_requests_[0]->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, true); @@ -408,7 +391,6 @@ TEST_P(RatelimitFailureModeIntegrationTest, ErrorWithFailureModeOff) { } TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OkWithFilterHeaders) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); @@ -444,7 +426,6 @@ TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OkWithFilterHeaders) { } TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OverLimitWithFilterHeaders) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); @@ -482,7 +463,6 @@ TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OverLimitWithFilterHeaders) TEST_P(RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest, OverLimitWithoutEnvoyRatelimitedHeader) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initiateClientConnection(); waitForRatelimitRequest(); sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, @@ -500,7 +480,6 @@ TEST_P(RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest, } TEST_P(RatelimitIntegrationTest, OverLimitAndOK) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; const int num_requests = 4; setNumRequests(num_requests); diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 1b11162b0789c..5c63045c7d870 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -1102,8 +1102,8 @@ TEST_F(HttpRateLimitFilterTest, DEPRECATED_FEATURE_TEST(ExcludeVirtualHost)) { EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) .WillOnce(Return(false)); - EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.ratelimit")) + EXPECT_CALL(*filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.ratelimit")) .WillOnce(Return(&per_route_config_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty()) @@ -1153,8 +1153,8 @@ TEST_F(HttpRateLimitFilterTest, OverrideVHRateLimitOptionWithRouteRateLimitSet) EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) .WillOnce(Return(false)); - EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.ratelimit")) + EXPECT_CALL(*filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.ratelimit")) .WillOnce(Return(&per_route_config_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty()) @@ -1202,8 +1202,8 @@ TEST_F(HttpRateLimitFilterTest, OverrideVHRateLimitOptionWithoutRouteRateLimit) EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) .WillOnce(Return(false)); - EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.ratelimit")) + EXPECT_CALL(*filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.ratelimit")) .WillOnce(Return(&per_route_config_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, empty()) @@ -1253,8 +1253,8 @@ TEST_F(HttpRateLimitFilterTest, IncludeVHRateLimitOptionWithOnlyVHRateLimitSet) EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) .WillOnce(Return(false)); - EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.ratelimit")) + EXPECT_CALL(*filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.ratelimit")) .WillOnce(Return(&per_route_config_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -1303,8 +1303,8 @@ TEST_F(HttpRateLimitFilterTest, IncludeVHRateLimitOptionWithRouteAndVHRateLimitS EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) .WillOnce(Return(false)); - EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.ratelimit")) + EXPECT_CALL(*filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.ratelimit")) .WillOnce(Return(&per_route_config_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -1353,8 +1353,8 @@ TEST_F(HttpRateLimitFilterTest, IgnoreVHRateLimitOptionWithRouteRateLimitSet) { EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) .WillOnce(Return(false)); - EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.ratelimit")) + EXPECT_CALL(*filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.ratelimit")) .WillOnce(Return(&per_route_config_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, @@ -1399,8 +1399,8 @@ TEST_F(HttpRateLimitFilterTest, IgnoreVHRateLimitOptionWithOutRouteRateLimit) { EXPECT_CALL(filter_callbacks_.route_->route_entry_, includeVirtualHostRateLimits()) .WillOnce(Return(false)); - EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_, - perFilterConfig("envoy.filters.http.ratelimit")) + EXPECT_CALL(*filter_callbacks_.route_, + mostSpecificPerFilterConfig("envoy.filters.http.ratelimit")) .WillOnce(Return(&per_route_config_)); EXPECT_CALL(filter_callbacks_.route_->route_entry_.virtual_host_.rate_limit_policy_, diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index 265fc0e05f2f5..90bbeb81006d4 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -71,6 +71,21 @@ name: rbac - any: true )EOF"; +const std::string RBAC_CONFIG_DENY_WITH_PATH_EXACT_MATCH = R"EOF( +name: rbac +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC + rules: + action: DENY + policies: + foo: + permissions: + - url_path: + path: { exact: "/deny" } + principals: + - any: true +)EOF"; + const std::string RBAC_CONFIG_WITH_PATH_IGNORE_CASE_MATCH = R"EOF( name: rbac typed_config: @@ -317,8 +332,10 @@ TEST_P(RBACIntegrationTest, RouteOverride) { EXPECT_EQ("200", response->headers().getStatusValue()); } -TEST_P(RBACIntegrationTest, PathWithQueryAndFragment) { +TEST_P(RBACIntegrationTest, PathWithQueryAndFragmentWithOverride) { config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH); + config_helper_.addRuntimeOverride("envoy.reloadable_features.http_reject_path_with_fragment", + "false"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -344,6 +361,62 @@ TEST_P(RBACIntegrationTest, PathWithQueryAndFragment) { } } +TEST_P(RBACIntegrationTest, PathWithFragmentRejectedByDefault) { + config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, + {":path", "/allow?p1=v1#seg"}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + // Request should not hit the upstream + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("400", response->headers().getStatusValue()); +} + +// This test ensures that the exact match deny rule is not affected by fragment and query +// when Envoy is configured to strip both fragment and query. +TEST_P(RBACIntegrationTest, DenyExactMatchIgnoresQueryAndFragment) { + config_helper_.addFilter(RBAC_CONFIG_DENY_WITH_PATH_EXACT_MATCH); + config_helper_.addRuntimeOverride("envoy.reloadable_features.http_reject_path_with_fragment", + "false"); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + const std::vector paths{"/deny#", "/deny#fragment", "/deny?p1=v1&p2=v2", + "/deny?p1=v1#seg"}; + + for (const auto& path : paths) { + printf("Testing: %s\n", path.c_str()); + auto response = codec_client_->makeRequestWithBody( + Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, + {":path", path}, + {":scheme", "http"}, + {":authority", "host"}, + {"x-forwarded-for", "10.0.0.1"}, + }, + 1024); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("403", response->headers().getStatusValue()); + if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + codec_client_ = makeHttpConnection(lookupPort("http")); + } + } +} + TEST_P(RBACIntegrationTest, PathIgnoreCase) { config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_IGNORE_CASE_MATCH); initialize(); diff --git a/test/extensions/filters/http/rbac/rbac_filter_test.cc b/test/extensions/filters/http/rbac/rbac_filter_test.cc index e7a8ed8e14dd8..4d50a70421da0 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_test.cc @@ -63,7 +63,7 @@ class RoleBasedAccessControlFilterTest : public testing::Test { void setDestinationPort(uint16_t port) { address_ = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", port, false); - req_info_.downstream_address_provider_->setLocalAddress(address_); + req_info_.downstream_connection_info_provider_->setLocalAddress(address_); } void setRequestedServerName(std::string server_name) { @@ -212,7 +212,7 @@ TEST_F(RoleBasedAccessControlFilterTest, RouteLocalOverride) { EXPECT_CALL(engine, handleAction(_, _, _, _)).WillRepeatedly(Return(true)); EXPECT_CALL(per_route_config_, engine()).WillRepeatedly(ReturnRef(engine)); - EXPECT_CALL(callbacks_.route_->route_entry_, perFilterConfig("envoy.filters.http.rbac")) + EXPECT_CALL(*callbacks_.route_, mostSpecificPerFilterConfig("envoy.filters.http.rbac")) .WillRepeatedly(Return(&per_route_config_)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_.decodeHeaders(headers_, true)); diff --git a/test/extensions/filters/http/router/config_test.cc b/test/extensions/filters/http/router/config_test.cc index af752c3192d4f..65d3f85544e72 100644 --- a/test/extensions/filters/http/router/config_test.cc +++ b/test/extensions/filters/http/router/config_test.cc @@ -27,7 +27,7 @@ TEST(RouterFilterConfigTest, SimpleRouterFilterConfig) { )EOF"; envoy::extensions::filters::http::router::v3::Router proto_config; - TestUtility::loadFromYaml(yaml_string, proto_config, false, true); + TestUtility::loadFromYaml(yaml_string, proto_config); NiceMock context; RouterFilterConfig factory; Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, "stats.", context); @@ -43,8 +43,8 @@ TEST(RouterFilterConfigTest, BadRouterFilterConfig) { )EOF"; envoy::extensions::filters::http::router::v3::Router proto_config; - EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config, false, true), - EnvoyException, "route: Cannot find field"); + EXPECT_THROW_WITH_REGEX(TestUtility::loadFromYaml(yaml_string, proto_config), EnvoyException, + "route: Cannot find field"); } TEST(RouterFilterConfigTest, RouterFilterWithUnsupportedStrictHeaderCheck) { @@ -54,7 +54,7 @@ TEST(RouterFilterConfigTest, RouterFilterWithUnsupportedStrictHeaderCheck) { )EOF"; envoy::extensions::filters::http::router::v3::Router router_config; - TestUtility::loadFromYaml(yaml, router_config, false, true); + TestUtility::loadFromYaml(yaml, router_config); NiceMock context; RouterFilterConfig factory; diff --git a/test/extensions/filters/http/wasm/wasm_filter_test.cc b/test/extensions/filters/http/wasm/wasm_filter_test.cc index d14d2601c0782..773886c5c9f6b 100644 --- a/test/extensions/filters/http/wasm/wasm_filter_test.cc +++ b/test/extensions/filters/http/wasm/wasm_filter_test.cc @@ -1124,12 +1124,20 @@ TEST_P(WasmHttpFilterTest, GrpcCallFailure) { filter().decodeHeaders(request_headers, false)); // Test some additional error paths. - EXPECT_EQ(filter().grpcSend(99999, "", false), proxy_wasm::WasmResult::BadArgument); - EXPECT_EQ(filter().grpcSend(10000, "", false), proxy_wasm::WasmResult::NotFound); - EXPECT_EQ(filter().grpcCancel(9999), proxy_wasm::WasmResult::NotFound); - EXPECT_EQ(filter().grpcCancel(10000), proxy_wasm::WasmResult::NotFound); - EXPECT_EQ(filter().grpcClose(9999), proxy_wasm::WasmResult::NotFound); - EXPECT_EQ(filter().grpcClose(10000), proxy_wasm::WasmResult::NotFound); + // 0xFF00 (HTTP call). + EXPECT_EQ(filter().grpcSend(0xFF00, "", false), proxy_wasm::WasmResult::BadArgument); + EXPECT_EQ(filter().grpcCancel(0xFF00), proxy_wasm::WasmResult::BadArgument); + EXPECT_EQ(filter().grpcClose(0xFF00), proxy_wasm::WasmResult::BadArgument); + + // 0xFF01 (gRPC call). + EXPECT_EQ(filter().grpcSend(0xFF01, "", false), proxy_wasm::WasmResult::BadArgument); + EXPECT_EQ(filter().grpcCancel(0xFF01), proxy_wasm::WasmResult::NotFound); + EXPECT_EQ(filter().grpcClose(0xFF01), proxy_wasm::WasmResult::NotFound); + + // 0xFF02 (gRPC stream). + EXPECT_EQ(filter().grpcSend(0xFF02, "", false), proxy_wasm::WasmResult::NotFound); + EXPECT_EQ(filter().grpcCancel(0xFF02), proxy_wasm::WasmResult::NotFound); + EXPECT_EQ(filter().grpcClose(0xFF02), proxy_wasm::WasmResult::NotFound); ProtobufWkt::Value value; value.set_string_value("response"); @@ -1765,12 +1773,12 @@ TEST_P(WasmHttpFilterTest, Property) { EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, true)); StreamInfo::MockStreamInfo log_stream_info; request_stream_info_.route_name_ = "route12"; - request_stream_info_.downstream_address_provider_->setRequestedServerName("w3.org"); + request_stream_info_.downstream_connection_info_provider_->setRequestedServerName("w3.org"); NiceMock connection; EXPECT_CALL(connection, id()).WillRepeatedly(Return(4)); EXPECT_CALL(encoder_callbacks_, connection()).WillRepeatedly(Return(&connection)); - NiceMock route_entry; - EXPECT_CALL(request_stream_info_, routeEntry()).WillRepeatedly(Return(&route_entry)); + std::shared_ptr route{new NiceMock()}; + EXPECT_CALL(request_stream_info_, route()).WillRepeatedly(Return(route)); std::shared_ptr> host_description( new NiceMock()); auto metadata = std::make_shared( diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc index 2dede890dd85b..766bc5898c0c5 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fakes.cc @@ -9,15 +9,16 @@ Network::IoHandle& FakeConnectionSocket::ioHandle() { return *io_handle_; } const Network::IoHandle& FakeConnectionSocket::ioHandle() const { return *io_handle_; } Network::Address::Type FakeConnectionSocket::addressType() const { - return address_provider_->localAddress()->type(); + return connection_info_provider_->localAddress()->type(); } absl::optional FakeConnectionSocket::ipVersion() const { - if (address_provider_->localAddress() == nullptr || addressType() != Network::Address::Type::Ip) { + if (connection_info_provider_->localAddress() == nullptr || + addressType() != Network::Address::Type::Ip) { return absl::nullopt; } - return address_provider_->localAddress()->ip()->version(); + return connection_info_provider_->localAddress()->ip()->version(); } void FakeConnectionSocket::setDetectedTransportProtocol(absl::string_view protocol) { diff --git a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc index 62fc696512081..280cfb4e5529b 100644 --- a/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc +++ b/test/extensions/filters/listener/common/fuzz/listener_filter_fuzzer.cc @@ -8,16 +8,18 @@ void ListenerFilterFuzzer::fuzz( Network::ListenerFilterPtr filter, const test::extensions::filters::listener::FilterFuzzTestCase& input) { try { - socket_.addressProvider().setLocalAddress( + socket_.connectionInfoProvider().setLocalAddress( Network::Utility::resolveUrl(input.sock().local_address())); } catch (const EnvoyException& e) { - socket_.addressProvider().setLocalAddress(Network::Utility::resolveUrl("tcp://0.0.0.0:0")); + socket_.connectionInfoProvider().setLocalAddress( + Network::Utility::resolveUrl("tcp://0.0.0.0:0")); } try { - socket_.addressProvider().setRemoteAddress( + socket_.connectionInfoProvider().setRemoteAddress( Network::Utility::resolveUrl(input.sock().remote_address())); } catch (const EnvoyException& e) { - socket_.addressProvider().setRemoteAddress(Network::Utility::resolveUrl("tcp://0.0.0.0:0")); + socket_.connectionInfoProvider().setRemoteAddress( + Network::Utility::resolveUrl("tcp://0.0.0.0:0")); } FuzzedInputStream data(input); diff --git a/test/extensions/filters/listener/original_src/original_src_test.cc b/test/extensions/filters/listener/original_src/original_src_test.cc index ede0069b8876e..45c4d2cb0d5fb 100644 --- a/test/extensions/filters/listener/original_src/original_src_test.cc +++ b/test/extensions/filters/listener/original_src/original_src_test.cc @@ -38,7 +38,8 @@ class OriginalSrcTest : public testing::Test { } void setAddressToReturn(const std::string& address) { - callbacks_.socket_.address_provider_->setRemoteAddress(Network::Utility::resolveUrl(address)); + callbacks_.socket_.connection_info_provider_->setRemoteAddress( + Network::Utility::resolveUrl(address)); } protected: @@ -81,8 +82,8 @@ TEST_F(OriginalSrcTest, OnNewConnectionIpv4AddressAddsOption) { NiceMock socket; options->at(0)->setOption(socket, envoy::config::core::v3::SocketOption::STATE_PREBIND); - EXPECT_EQ(*socket.addressProvider().localAddress(), - *callbacks_.socket_.addressProvider().remoteAddress()); + EXPECT_EQ(*socket.connectionInfoProvider().localAddress(), + *callbacks_.socket_.connectionInfoProvider().remoteAddress()); } TEST_F(OriginalSrcTest, OnNewConnectionIpv4AddressUsesCorrectAddress) { @@ -115,7 +116,7 @@ TEST_F(OriginalSrcTest, OnNewConnectionIpv4AddressBleachesPort) { // not ideal -- we're assuming that the original_src option is first, but it's a fair assumption // for now. options->at(0)->setOption(socket, envoy::config::core::v3::SocketOption::STATE_PREBIND); - EXPECT_EQ(*socket.addressProvider().localAddress(), *expected_address); + EXPECT_EQ(*socket.connectionInfoProvider().localAddress(), *expected_address); } TEST_F(OriginalSrcTest, FilterAddsTransparentOption) { diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 5de9c4c9e4e7e..9b10643009dd6 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -62,10 +62,10 @@ class ProxyProtocolTest : public testing::TestWithParamaddressProvider().localAddress())); + .WillOnce(ReturnRef(socket_->connectionInfoProvider().localAddress())); EXPECT_CALL(socket_factory_, getListenSocket(_)).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); - conn_ = dispatcher_->createClientConnection(socket_->addressProvider().localAddress(), + conn_ = dispatcher_->createClientConnection(socket_->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); conn_->addConnectionCallbacks(connection_callbacks_); @@ -215,9 +215,9 @@ TEST_P(ProxyProtocolTest, V1Basic) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "1.2.3.4"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -229,13 +229,13 @@ TEST_P(ProxyProtocolTest, V1Minimal) { expectData("more data"); if (GetParam() == Envoy::Network::Address::IpVersion::v4) { - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "127.0.0.1"); } else { - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "::1"); } - EXPECT_FALSE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_FALSE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -251,9 +251,9 @@ TEST_P(ProxyProtocolTest, V2Basic) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "1.2.3.4"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -264,9 +264,9 @@ TEST_P(ProxyProtocolTest, BasicV6) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "1:2:3::4"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -284,9 +284,9 @@ TEST_P(ProxyProtocolTest, V2BasicV6) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "1:2:3::4"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -444,16 +444,16 @@ TEST_P(ProxyProtocolTest, V2LocalConnection) { connect(); write(buffer, sizeof(buffer)); expectData("more data"); - if (server_connection_->addressProvider().remoteAddress()->ip()->version() == + if (server_connection_->connectionInfoProvider().remoteAddress()->ip()->version() == Envoy::Network::Address::IpVersion::v6) { - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "::1"); - } else if (server_connection_->addressProvider().remoteAddress()->ip()->version() == + } else if (server_connection_->connectionInfoProvider().remoteAddress()->ip()->version() == Envoy::Network::Address::IpVersion::v4) { - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "127.0.0.1"); } - EXPECT_FALSE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_FALSE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -465,16 +465,16 @@ TEST_P(ProxyProtocolTest, V2LocalConnectionExtension) { connect(); write(buffer, sizeof(buffer)); expectData("more data"); - if (server_connection_->addressProvider().remoteAddress()->ip()->version() == + if (server_connection_->connectionInfoProvider().remoteAddress()->ip()->version() == Envoy::Network::Address::IpVersion::v6) { - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "::1"); - } else if (server_connection_->addressProvider().remoteAddress()->ip()->version() == + } else if (server_connection_->connectionInfoProvider().remoteAddress()->ip()->version() == Envoy::Network::Address::IpVersion::v4) { - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "127.0.0.1"); } - EXPECT_FALSE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_FALSE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -695,9 +695,9 @@ TEST_P(ProxyProtocolTest, Fragmented) { // the results. Since we must have data we might as well check that we get it. expectData("..."); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "254.254.254.254"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -717,9 +717,9 @@ TEST_P(ProxyProtocolTest, V2Fragmented1) { write(buffer + 20, 17); expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "1.2.3.4"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -739,9 +739,9 @@ TEST_P(ProxyProtocolTest, V2Fragmented2) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "1.2.3.4"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -896,9 +896,9 @@ TEST_P(ProxyProtocolTest, PartialRead) { expectData("..."); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "254.254.254.254"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -921,9 +921,9 @@ TEST_P(ProxyProtocolTest, V2PartialRead) { expectData("moredata"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->ip()->addressAsString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString(), "1.2.3.4"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -1290,13 +1290,13 @@ class WildcardProxyProtocolTest : public testing::TestWithParamaddressProvider().localAddress()->ip()->port())), + socket_->connectionInfoProvider().localAddress()->ip()->port())), connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)), name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), init_manager_(nullptr) { EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream)); EXPECT_CALL(socket_factory_, localAddress()) - .WillOnce(ReturnRef(socket_->addressProvider().localAddress())); + .WillOnce(ReturnRef(socket_->connectionInfoProvider().localAddress())); EXPECT_CALL(socket_factory_, getListenSocket(_)).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); conn_ = dispatcher_->createClientConnection(local_dst_address_, @@ -1421,10 +1421,11 @@ TEST_P(WildcardProxyProtocolTest, Basic) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->asString(), "1.2.3.4:65535"); - EXPECT_EQ(server_connection_->addressProvider().localAddress()->asString(), + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->asString(), + "1.2.3.4:65535"); + EXPECT_EQ(server_connection_->connectionInfoProvider().localAddress()->asString(), "254.254.254.254:1234"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } @@ -1435,9 +1436,11 @@ TEST_P(WildcardProxyProtocolTest, BasicV6) { expectData("more data"); - EXPECT_EQ(server_connection_->addressProvider().remoteAddress()->asString(), "[1:2:3::4]:65535"); - EXPECT_EQ(server_connection_->addressProvider().localAddress()->asString(), "[5:6::7:8]:1234"); - EXPECT_TRUE(server_connection_->addressProvider().localAddressRestored()); + EXPECT_EQ(server_connection_->connectionInfoProvider().remoteAddress()->asString(), + "[1:2:3::4]:65535"); + EXPECT_EQ(server_connection_->connectionInfoProvider().localAddress()->asString(), + "[5:6::7:8]:1234"); + EXPECT_TRUE(server_connection_->connectionInfoProvider().localAddressRestored()); disconnect(); } diff --git a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc index bad7675a9e2c3..a301eee1665e2 100644 --- a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc @@ -159,7 +159,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { // Create a new filter for an SSL connection, with no backing auth data yet. createAuthFilter(); ON_CALL(filter_callbacks_.connection_, ssl()).WillByDefault(Return(ssl_)); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("192.168.1.1")); std::string expected_sha_1("digest"); EXPECT_CALL(*ssl_, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha_1)); @@ -182,7 +182,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { // Create a new filter for an SSL connection with an authorized cert. createAuthFilter(); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("192.168.1.1")); std::string expected_sha_2("1b7d42ef0025ad89c1c911d6c10d7e86a4cb7c5863b2980abcbad1895f8b5314"); EXPECT_CALL(*ssl_, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha_2)); @@ -195,7 +195,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { // White list case. createAuthFilter(); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("1.2.3.4")); EXPECT_EQ(Network::FilterStatus::StopIteration, instance_->onNewConnection()); EXPECT_CALL(filter_callbacks_, continueReading()); @@ -206,7 +206,7 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { // IPv6 White list case. createAuthFilter(); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( std::make_shared("2001:abcd::1")); EXPECT_EQ(Network::FilterStatus::StopIteration, instance_->onNewConnection()); EXPECT_CALL(filter_callbacks_, continueReading()); diff --git a/test/extensions/filters/network/common/fuzz/BUILD b/test/extensions/filters/network/common/fuzz/BUILD index c27cea334c5a2..1ca89bf59f76f 100644 --- a/test/extensions/filters/network/common/fuzz/BUILD +++ b/test/extensions/filters/network/common/fuzz/BUILD @@ -102,9 +102,7 @@ envoy_cc_fuzz_test( deps = [ ":uber_writefilter_lib", "//source/common/config:utility_lib", - "//source/extensions/filters/network/kafka:kafka_broker_config_lib", "//source/extensions/filters/network/mongo_proxy:config", - "//source/extensions/filters/network/mysql_proxy:config", "//source/extensions/filters/network/zookeeper_proxy:config", "//test/config:utility_lib", ], diff --git a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc index 7c70fa1bd44e4..db7af7eb32ea5 100644 --- a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc @@ -77,16 +77,16 @@ void UberFilterFuzzer::perFilterSetup(const std::string& filter_name) { .WillByDefault(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool, Grpc::CacheOption) { return async_client_; })); - read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setLocalAddress( - pipe_addr_); - read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - pipe_addr_); + read_filter_callbacks_->connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(pipe_addr_); + read_filter_callbacks_->connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(pipe_addr_); } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager || filter_name == NetworkFilterNames::get().EnvoyMobileHttpConnectionManager) { - read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setLocalAddress( - pipe_addr_); - read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - pipe_addr_); + read_filter_callbacks_->connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(pipe_addr_); + read_filter_callbacks_->connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(pipe_addr_); } else if (filter_name == NetworkFilterNames::get().RateLimit) { async_client_factory_ = std::make_unique(); async_client_ = std::make_unique(); @@ -106,10 +106,10 @@ void UberFilterFuzzer::perFilterSetup(const std::string& filter_name) { getOrCreateRawAsyncClient(_, _, _, _)) .WillByDefault(Invoke([&](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool, Grpc::CacheOption) { return async_client_; })); - read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setLocalAddress( - pipe_addr_); - read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - pipe_addr_); + read_filter_callbacks_->connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(pipe_addr_); + read_filter_callbacks_->connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(pipe_addr_); } } diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc index 2179484abcda6..ca021727c2d17 100644 --- a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc @@ -1,7 +1,6 @@ #include "test/extensions/filters/network/common/fuzz/uber_readfilter.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" #include "source/common/network/address_impl.h" using testing::Return; diff --git a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc index da4979656e822..6772fe995e22c 100644 --- a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc @@ -1,7 +1,6 @@ #include "test/extensions/filters/network/common/fuzz/uber_writefilter.h" #include "source/common/config/utility.h" -#include "source/common/config/version_converter.h" using testing::_; using testing::Return; diff --git a/test/extensions/filters/network/dubbo_proxy/config_test.cc b/test/extensions/filters/network/dubbo_proxy/config_test.cc index e10262aa646bb..c525e3638c3ab 100644 --- a/test/extensions/filters/network/dubbo_proxy/config_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/config_test.cc @@ -22,9 +22,9 @@ using DubboProxyProto = envoy::extensions::filters::network::dubbo_proxy::v3::Du namespace { -DubboProxyProto parseDubboProxyFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { +DubboProxyProto parseDubboProxyFromV3Yaml(const std::string& yaml) { DubboProxyProto dubbo_proxy; - TestUtility::loadFromYaml(yaml, dubbo_proxy, false, avoid_boosting); + TestUtility::loadFromYaml(yaml, dubbo_proxy); return dubbo_proxy; } diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 1c0e9b10e3723..56c60e93bb3f9 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -1023,26 +1023,6 @@ TEST_F(ConnectionManagerTest, SendsLocalReplyWithCloseConnection) { conn_manager_->sendLocalReply(metadata, direct_response, true); } -TEST_F(ConnectionManagerTest, ContinueDecodingWithHalfClose) { - initializeFilter(); - writeHessianRequestMessage(buffer_, true, false, 0x0F); - - config_->setupFilterChain(1, 0); - config_->expectOnDestroy(); - auto& decoder_filter = config_->decoder_filters_[0]; - - EXPECT_CALL(*decoder_filter, onMessageDecoded(_, _)) - .WillOnce(Invoke([&](MessageMetadataSharedPtr, ContextSharedPtr) -> FilterStatus { - return FilterStatus::StopIteration; - })); - EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::FlushWrite)); - EXPECT_CALL(filter_callbacks_.connection_.dispatcher_, deferredDelete_(_)); - EXPECT_EQ(conn_manager_->onData(buffer_, true), Network::FilterStatus::StopIteration); - EXPECT_EQ(1U, store_.counter("test.cx_destroy_remote_with_active_rq").value()); - - conn_manager_->continueDecoding(); -} - TEST_F(ConnectionManagerTest, RoutingSuccess) { initializeFilter(); writeHessianRequestMessage(buffer_, false, false, 0x0F); diff --git a/test/extensions/filters/network/ext_authz/config_test.cc b/test/extensions/filters/network/ext_authz/config_test.cc index 470026c43da1b..2d26fc050621c 100644 --- a/test/extensions/filters/network/ext_authz/config_test.cc +++ b/test/extensions/filters/network/ext_authz/config_test.cc @@ -21,11 +21,7 @@ namespace NetworkFilters { namespace ExtAuthz { namespace { -void expectCorrectProto(envoy::config::core::v3::ApiVersion api_version) { - std::unique_ptr _deprecated_v2_api; - if (api_version != envoy::config::core::v3::ApiVersion::V3) { - _deprecated_v2_api = std::make_unique(); - } +void expectCorrectProto() { std::string yaml = R"EOF( grpc_service: google_grpc: @@ -33,13 +29,12 @@ void expectCorrectProto(envoy::config::core::v3::ApiVersion api_version) { stat_prefix: google failure_mode_allow: false stat_prefix: name - transport_api_version: {} + transport_api_version: V3 )EOF"; ExtAuthzConfigFactory factory; ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); - TestUtility::loadFromYaml( - fmt::format(yaml, TestUtility::getVersionStringFromApiVersion(api_version)), *proto_config); + TestUtility::loadFromYaml(yaml, *proto_config); NiceMock context; testing::StrictMock server_context; @@ -67,13 +62,7 @@ TEST(ExtAuthzFilterConfigTest, ValidateFail) { ProtoValidationException); } -TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { -#ifndef ENVOY_DISABLE_DEPRECATED_FEATURES - expectCorrectProto(envoy::config::core::v3::ApiVersion::AUTO); - expectCorrectProto(envoy::config::core::v3::ApiVersion::V2); -#endif - expectCorrectProto(envoy::config::core::v3::ApiVersion::V3); -} +TEST(ExtAuthzFilterConfigTest, ExtAuthzCorrectProto) { expectCorrectProto(); } // Test that the deprecated extension name still functions. TEST(ExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { diff --git a/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc index 801fd4e450800..db2b6c0da9404 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_fuzz_test.cc @@ -74,8 +74,10 @@ DEFINE_PROTO_FUZZER(const envoy::extensions::filters::network::ext_authz::ExtAut static Network::Address::InstanceConstSharedPtr addr = std::make_shared("/test/test.sock"); - filter_callbacks.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr); - filter_callbacks.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr); + filter_callbacks.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr); + filter_callbacks.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr); for (const auto& action : input.actions()) { switch (action.action_selector_case()) { diff --git a/test/extensions/filters/network/ext_authz/ext_authz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_test.cc index d123c9b6171fe..0f2190a17d974 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_test.cc @@ -65,9 +65,10 @@ class ExtAuthzFilterTest : public testing::Test { } void expectOKWithOnData() { - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(addr_); EXPECT_CALL(*client_, check(_, _, testing::A(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { @@ -180,8 +181,10 @@ TEST_F(ExtAuthzFilterTest, DeniedWithOnData) { initialize(default_yaml_string_); InSequence s; - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr_); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { @@ -221,8 +224,10 @@ TEST_F(ExtAuthzFilterTest, FailOpen) { initialize(default_yaml_string_); InSequence s; - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr_); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { @@ -255,8 +260,10 @@ TEST_F(ExtAuthzFilterTest, FailClose) { // Explicitly set the failure_mode_allow to false. config_->setFailModeAllow(false); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr_); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { @@ -286,8 +293,10 @@ TEST_F(ExtAuthzFilterTest, DoNotCallCancelonRemoteClose) { initialize(default_yaml_string_); InSequence s; - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr_); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { @@ -321,8 +330,10 @@ TEST_F(ExtAuthzFilterTest, VerifyCancelOnRemoteClose) { initialize(default_yaml_string_); InSequence s; - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr_); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { @@ -351,15 +362,28 @@ TEST_F(ExtAuthzFilterTest, ImmediateOK) { initialize(default_yaml_string_); InSequence s; - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr_); + ProtobufWkt::Struct dynamic_metadata; + (*dynamic_metadata.mutable_fields())["baz"] = ValueUtil::stringValue("hello-ok"); + (*dynamic_metadata.mutable_fields())["x"] = ValueUtil::numberValue(12); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { - callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::OK)); + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + response.dynamic_metadata = dynamic_metadata; + callbacks.onComplete(std::make_unique(response)); }))); - EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)).Times(0); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)) + .WillOnce(Invoke([&dynamic_metadata](const std::string& ns, + const ProtobufWkt::Struct& returned_dynamic_metadata) { + EXPECT_EQ(ns, NetworkFilterNames::get().ExtAuthorization); + EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, dynamic_metadata)); + })); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); @@ -383,14 +407,28 @@ TEST_F(ExtAuthzFilterTest, ImmediateNOK) { initialize(default_yaml_string_); InSequence s; - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr_); + ProtobufWkt::Struct dynamic_metadata; + (*dynamic_metadata.mutable_fields())["baz"] = ValueUtil::stringValue("hello-nok"); + (*dynamic_metadata.mutable_fields())["x"] = ValueUtil::numberValue(15); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { - callbacks.onComplete(makeAuthzResponse(Filters::Common::ExtAuthz::CheckStatus::Denied)); + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; + response.dynamic_metadata = dynamic_metadata; + callbacks.onComplete(std::make_unique(response)); }))); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, setDynamicMetadata(_, _)) + .WillOnce(Invoke([&dynamic_metadata](const std::string& ns, + const ProtobufWkt::Struct& returned_dynamic_metadata) { + EXPECT_EQ(ns, NetworkFilterNames::get().ExtAuthorization); + EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, dynamic_metadata)); + })); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); @@ -411,8 +449,10 @@ TEST_F(ExtAuthzFilterTest, ImmediateErrorFailOpen) { initialize(default_yaml_string_); InSequence s; - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress(addr_); - filter_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress(addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_); + filter_callbacks_.connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( + addr_); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); EXPECT_CALL(*client_, check(_, _, _, _)) .WillOnce( diff --git a/test/extensions/filters/network/http_connection_manager/BUILD b/test/extensions/filters/network/http_connection_manager/BUILD index c4ca663865670..b5a2518583394 100644 --- a/test/extensions/filters/network/http_connection_manager/BUILD +++ b/test/extensions/filters/network/http_connection_manager/BUILD @@ -25,7 +25,7 @@ envoy_extension_cc_test_library( extension_names = ["envoy.filters.network.http_connection_manager"], deps = [ ":config_cc_proto", - "//source/common/filter/http:filter_config_discovery_lib", + "//source/common/filter:config_discovery_lib", "//source/common/network:address_lib", "//source/extensions/filters/http/common:factory_base_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 80065d021eb91..1681eea7db600 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -143,10 +143,7 @@ stat_prefix: router "chain."); } -// When deprecating v2, remove the old style "operation_name: egress" config -// but retain the rest of the test. -TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(MiscConfig)) { - TestDeprecatedV2Api _deprecated_v2_api; +TEST_F(HttpConnectionManagerConfigTest, MiscConfig) { const std::string yaml_string = R"EOF( codec_type: http1 server_name: foo @@ -162,14 +159,13 @@ stat_prefix: router route: cluster: cluster tracing: - operation_name: egress max_path_tag_length: 128 http_filters: - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_, filter_config_provider_manager_); @@ -201,10 +197,23 @@ stat_prefix: router )EOF"; #ifdef ENVOY_ENABLE_QUIC - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, - date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_, - filter_config_provider_manager_); + { + EXPECT_CALL(context_, isQuicListener()).WillOnce(Return(false)); + + EXPECT_THROW_WITH_MESSAGE( + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), + context_, date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, + http_tracer_manager_, filter_config_provider_manager_), + EnvoyException, "HTTP/3 codec configured on non-QUIC listener."); + } + { + EXPECT_CALL(context_, isQuicListener()).WillOnce(Return(true)); + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + } #else EXPECT_THROW_WITH_MESSAGE( HttpConnectionManagerConfig(parseHttpConnectionManagerFromYaml(yaml_string), context_, @@ -215,6 +224,35 @@ stat_prefix: router #endif } +TEST_F(HttpConnectionManagerConfigTest, Http3HalfConfigured) { + const std::string yaml_string = R"EOF( +codec_type: http1 +server_name: foo +stat_prefix: router +route_config: + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: cluster +http_filters: +- name: envoy.filters.http.router + )EOF"; + + EXPECT_CALL(context_, isQuicListener()).WillOnce(Return(true)); + + EXPECT_THROW_WITH_MESSAGE( + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, + http_tracer_manager_, filter_config_provider_manager_), + EnvoyException, "Non-HTTP/3 codec configured on QUIC listener."); +} + TEST_F(HttpConnectionManagerConfigTest, TracingNotEnabledAndNoTracingConfigInBootstrap) { const std::string yaml_string = R"EOF( codec_type: http1 @@ -385,7 +423,7 @@ stat_prefix: router typed_config: "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: zipkin - collector_endpoint: "/api/v1/spans" + collector_endpoint: "/api/v2/spans" collector_endpoint_version: HTTP_JSON http_filters: - name: envoy.filters.http.router @@ -403,7 +441,7 @@ stat_prefix: router inlined_tracing_config.set_name("zipkin"); envoy::config::trace::v3::ZipkinConfig zipkin_config; zipkin_config.set_collector_cluster("zipkin"); - zipkin_config.set_collector_endpoint("/api/v1/spans"); + zipkin_config.set_collector_endpoint("/api/v2/spans"); zipkin_config.set_collector_endpoint_version(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON); inlined_tracing_config.mutable_typed_config()->PackFrom(zipkin_config); @@ -413,8 +451,8 @@ stat_prefix: router EXPECT_CALL(http_tracer_manager_, getOrCreateHttpTracer(Pointee(ProtoEq(inlined_tracing_config)))) .WillOnce(Return(http_tracer_)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_, filter_config_provider_manager_); @@ -457,85 +495,6 @@ stat_prefix: router } } -TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(RequestHeaderForTagsConfig)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml_string = R"EOF( -stat_prefix: router -route_config: - name: local_route -tracing: - request_headers_for_tags: - - foo - )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_, - filter_config_provider_manager_); - - const Tracing::CustomTagMap& custom_tag_map = config.tracingConfig()->custom_tags_; - const Tracing::RequestHeaderCustomTag* foo = dynamic_cast( - custom_tag_map.find("foo")->second.get()); - EXPECT_NE(foo, nullptr); - EXPECT_EQ(foo->tag(), "foo"); -} - -TEST_F(HttpConnectionManagerConfigTest, - DEPRECATED_FEATURE_TEST(ListenerDirectionOutboundOverride)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml_string = R"EOF( -stat_prefix: router -route_config: - virtual_hosts: - - name: service - domains: - - "*" - routes: - - match: - prefix: "/" - route: - cluster: cluster -tracing: - operation_name: ingress -http_filters: -- name: envoy.filters.http.router - )EOF"; - - ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::OUTBOUND)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_, - filter_config_provider_manager_); - EXPECT_EQ(Tracing::OperationName::Egress, config.tracingConfig()->operation_name_); -} - -TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(ListenerDirectionInboundOverride)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml_string = R"EOF( -stat_prefix: router -route_config: - virtual_hosts: - - name: service - domains: - - "*" - routes: - - match: - prefix: "/" - route: - cluster: cluster -tracing: - operation_name: egress -http_filters: -- name: envoy.filters.http.router - )EOF"; - - ON_CALL(context_, direction()).WillByDefault(Return(envoy::config::core::v3::INBOUND)); - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_, - filter_config_provider_manager_); - EXPECT_EQ(Tracing::OperationName::Ingress, config.tracingConfig()->operation_name_); -} - TEST_F(HttpConnectionManagerConfigTest, SamplingDefault) { const std::string yaml_string = R"EOF( stat_prefix: ingress_http @@ -548,8 +507,8 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingDefault) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_, filter_config_provider_manager_); @@ -583,8 +542,8 @@ TEST_F(HttpConnectionManagerConfigTest, SamplingConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_, filter_config_provider_manager_); @@ -617,8 +576,8 @@ TEST_F(HttpConnectionManagerConfigTest, FractionalSamplingConfigured) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_, filter_config_provider_manager_); @@ -651,8 +610,8 @@ TEST_F(HttpConnectionManagerConfigTest, OverallSampling) { - name: envoy.filters.http.router )EOF"; - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, scoped_routes_config_provider_manager_, http_tracer_manager_, filter_config_provider_manager_); @@ -2033,6 +1992,46 @@ TEST_F(HttpConnectionManagerConfigTest, OriginalIPDetectionExtension) { EXPECT_EQ(1, original_ip_detection_extensions.size()); } +TEST_F(HttpConnectionManagerConfigTest, OriginalIPDetectionExtensionMixedWithUseRemoteAddress) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + use_remote_address: true + original_ip_detection_extensions: + - name: envoy.http.original_ip_detection.custom_header + typed_config: + "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.custom_header.v3.CustomHeaderConfig + header_name: x-ip-header + http_filters: + - name: envoy.filters.http.router + )EOF"; + + EXPECT_THROW_WITH_REGEX( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Original IP detection extensions and use_remote_address may not be mixed"); +} + +TEST_F(HttpConnectionManagerConfigTest, OriginalIPDetectionExtensionMixedWithNumTrustedHops) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + xff_num_trusted_hops: 1 + original_ip_detection_extensions: + - name: envoy.http.original_ip_detection.custom_header + typed_config: + "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.custom_header.v3.CustomHeaderConfig + header_name: x-ip-header + http_filters: + - name: envoy.filters.http.router + )EOF"; + + EXPECT_THROW_WITH_REGEX( + createHttpConnectionManagerConfig(yaml_string), EnvoyException, + "Original IP detection extensions and xff_num_trusted_hops may not be mixed"); +} + TEST_F(HttpConnectionManagerConfigTest, DynamicFilterWarmingNoDefault) { const std::string yaml_string = R"EOF( codec_type: http1 @@ -2448,7 +2447,7 @@ TEST_F(HttpConnectionManagerMobileConfigTest, Mobile) { envoy::extensions::filters::network::http_connection_manager::v3::EnvoyMobileHttpConnectionManager config; - TestUtility::loadFromYamlAndValidate(yaml_string, config, false, true); + TestUtility::loadFromYamlAndValidate(yaml_string, config); MobileHttpConnectionManagerFilterConfigFactory factory; Network::FilterFactoryCb create_hcm_cb = factory.createFilterFactoryFromProto(config, context_); diff --git a/test/extensions/filters/network/http_connection_manager/config_test_base.h b/test/extensions/filters/network/http_connection_manager/config_test_base.h index 1737bd1622531..995cb9842e146 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test_base.h +++ b/test/extensions/filters/network/http_connection_manager/config_test_base.h @@ -1,6 +1,6 @@ #pragma once -#include "source/common/filter/http/filter_config_discovery_impl.h" +#include "source/common/filter/config_discovery_impl.h" #include "source/common/http/date_provider_impl.h" #include "source/common/network/address_impl.h" #include "source/extensions/filters/http/common/factory_base.h" @@ -24,10 +24,10 @@ namespace NetworkFilters { namespace HttpConnectionManager { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager -parseHttpConnectionManagerFromYaml(const std::string& yaml, bool avoid_boosting = true) { +parseHttpConnectionManagerFromYaml(const std::string& yaml) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager http_connection_manager; - TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager, false, avoid_boosting); + TestUtility::loadFromYamlAndValidate(yaml, http_connection_manager); return http_connection_manager; } @@ -38,7 +38,7 @@ class HttpConnectionManagerConfigTest : public testing::Test { NiceMock route_config_provider_manager_; NiceMock scoped_routes_config_provider_manager_; NiceMock http_tracer_manager_; - Filter::Http::FilterConfigProviderManagerImpl filter_config_provider_manager_; + Filter::HttpFilterConfigProviderManagerImpl filter_config_provider_manager_; std::shared_ptr> http_tracer_{ std::make_shared>()}; void createHttpConnectionManagerConfig(const std::string& yaml) { diff --git a/test/extensions/filters/network/kafka/broker/BUILD b/test/extensions/filters/network/kafka/broker/BUILD deleted file mode 100644 index 5abebe3f3485f..0000000000000 --- a/test/extensions/filters/network/kafka/broker/BUILD +++ /dev/null @@ -1,47 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_package", -) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", -) - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_extension_cc_test( - name = "config_unit_test", - srcs = ["config_unit_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], - deps = [ - "//source/extensions/filters/network/kafka:kafka_broker_config_lib", - "//test/mocks/server:factory_context_mocks", - ], -) - -envoy_extension_cc_test( - name = "filter_unit_test", - srcs = ["filter_unit_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], - deps = [ - "//envoy/event:timer_interface", - "//source/extensions/filters/network/kafka:kafka_broker_filter_lib", - "//test/mocks/network:network_mocks", - "//test/mocks/stats:stats_mocks", - ], -) - -envoy_extension_cc_test( - name = "filter_protocol_test", - srcs = ["filter_protocol_test.cc"], - extension_names = ["envoy.filters.network.kafka_broker"], - deps = [ - "//source/extensions/filters/network/kafka:kafka_broker_filter_lib", - "//test/common/stats:stat_test_utility_lib", - "//test/extensions/filters/network/kafka:buffer_based_test_lib", - "//test/extensions/filters/network/kafka:message_utilities", - "//test/test_common:test_time_lib", - ], -) diff --git a/test/extensions/filters/network/kafka/mesh/BUILD b/test/extensions/filters/network/kafka/mesh/BUILD deleted file mode 100644 index ab7507ffef02c..0000000000000 --- a/test/extensions/filters/network/kafka/mesh/BUILD +++ /dev/null @@ -1,46 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_package", -) -load( - "//test/extensions:extensions_build_system.bzl", - "envoy_extension_cc_test", -) - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_extension_cc_test( - name = "filter_unit_test", - srcs = ["filter_unit_test.cc"], - # This name needs to be changed after we have the mesh filter ready. - extension_names = ["envoy.filters.network.kafka_broker"], - tags = ["skip_on_windows"], - deps = [ - "//source/extensions/filters/network/kafka/mesh:filter_lib", - "//test/mocks/network:network_mocks", - ], -) - -envoy_extension_cc_test( - name = "request_processor_unit_test", - srcs = ["request_processor_unit_test.cc"], - # This name needs to be changed after we have the mesh filter ready. - extension_names = ["envoy.filters.network.kafka_broker"], - tags = ["skip_on_windows"], - deps = [ - "//source/extensions/filters/network/kafka/mesh:request_processor_lib", - ], -) - -envoy_extension_cc_test( - name = "abstract_command_unit_test", - srcs = ["abstract_command_unit_test.cc"], - # This name needs to be changed after we have the mesh filter ready. - extension_names = ["envoy.filters.network.kafka_broker"], - tags = ["skip_on_windows"], - deps = [ - "//source/extensions/filters/network/kafka/mesh:abstract_command_lib", - ], -) diff --git a/test/extensions/filters/network/kafka/mesh/request_processor_unit_test.cc b/test/extensions/filters/network/kafka/mesh/request_processor_unit_test.cc deleted file mode 100644 index 45639eb9b223b..0000000000000 --- a/test/extensions/filters/network/kafka/mesh/request_processor_unit_test.cc +++ /dev/null @@ -1,45 +0,0 @@ -#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" -#include "source/extensions/filters/network/kafka/mesh/request_processor.h" - -#include "test/test_common/utility.h" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -namespace Envoy { -namespace Extensions { -namespace NetworkFilters { -namespace Kafka { -namespace Mesh { -namespace { - -class RequestProcessorTest : public testing::Test { -protected: - RequestProcessor testee_ = {}; -}; - -TEST_F(RequestProcessorTest, ShouldHandleUnsupportedRequest) { - // given - const RequestHeader header = {0, 0, 0, absl::nullopt}; - const ListOffsetRequest data = {0, {}}; - const auto message = std::make_shared>(header, data); - - // when, then - exception gets thrown. - EXPECT_THROW_WITH_REGEX(testee_.onMessage(message), EnvoyException, "unsupported"); -} - -TEST_F(RequestProcessorTest, ShouldHandleUnparseableRequest) { - // given - const RequestHeader header = {42, 42, 42, absl::nullopt}; - const auto arg = std::make_shared(header); - - // when, then - exception gets thrown. - EXPECT_THROW_WITH_REGEX(testee_.onFailedParse(arg), EnvoyException, "unknown"); -} - -} // anonymous namespace -} // namespace Mesh -} // namespace Kafka -} // namespace NetworkFilters -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/filters/network/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/ratelimit/ratelimit_test.cc index c59ba01e37a19..3be03981d4899 100644 --- a/test/extensions/filters/network/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/ratelimit/ratelimit_test.cc @@ -41,7 +41,7 @@ class RateLimitFilterTest : public testing::Test { .WillByDefault(Return(true)); envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{}; - TestUtility::loadFromYaml(yaml, proto_config, false, true); + TestUtility::loadFromYaml(yaml, proto_config); config_ = std::make_shared(proto_config, stats_store_, runtime_); client_ = new Filters::Common::RateLimit::MockClient(); filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); diff --git a/test/extensions/filters/network/rbac/filter_test.cc b/test/extensions/filters/network/rbac/filter_test.cc index 0bcf52baa8d6f..0d62c804b5c59 100644 --- a/test/extensions/filters/network/rbac/filter_test.cc +++ b/test/extensions/filters/network/rbac/filter_test.cc @@ -65,7 +65,7 @@ class RoleBasedAccessControlNetworkFilterTest : public testing::Test { void setDestinationPort(uint16_t port) { address_ = Envoy::Network::Utility::parseInternetAddress("1.2.3.4", port, false); - stream_info_.downstream_address_provider_->setLocalAddress(address_); + stream_info_.downstream_connection_info_provider_->setLocalAddress(address_); } void setRequestedServerName(std::string server_name) { diff --git a/test/extensions/filters/network/redis_proxy/config_test.cc b/test/extensions/filters/network/redis_proxy/config_test.cc index b57c1c772549e..0286d6a5c5bc3 100644 --- a/test/extensions/filters/network/redis_proxy/config_test.cc +++ b/test/extensions/filters/network/redis_proxy/config_test.cc @@ -66,63 +66,6 @@ settings: {} ProtoValidationException, "embedded message failed validation"); } -TEST(RedisProxyFilterConfigFactoryTest, - DEPRECATED_FEATURE_TEST(RedisProxyCorrectProtoLegacyCluster)) { - TestDeprecatedV2Api _deprecated_v2_api; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.deprecated_features:envoy.config.filter.network.redis_proxy.v2.RedisProxy.cluster", - "true"}, - {"envoy.deprecated_features:envoy.extensions.filters.network.redis_proxy.v3.RedisProxy." - "hidden_envoy_deprecated_cluster", - "true"}}); - - const std::string yaml = R"EOF( -cluster: fake_cluster -stat_prefix: foo -settings: - op_timeout: 0.02s - )EOF"; - - envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config, true, false); - NiceMock context; - RedisProxyFilterConfigFactory factory; - Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); - EXPECT_TRUE(factory.isTerminalFilterByProto(proto_config, context)); - Network::MockConnection connection; - EXPECT_CALL(connection, addReadFilter(_)); - cb(connection); -} - -TEST(RedisProxyFilterConfigFactoryTest, - DEPRECATED_FEATURE_TEST(RedisProxyCorrectProtoLegacyCatchAllCluster)) { - TestDeprecatedV2Api _deprecated_v2_api; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.deprecated_features:envoy.config.filter.network.redis_proxy.v2.RedisProxy." - "PrefixRoutes.catch_all_cluster", - "true"}, - {"envoy.deprecated_features:envoy.extensions.filters.network.redis_proxy.v3.RedisProxy." - "PrefixRoutes.hidden_envoy_deprecated_catch_all_cluster", - "true"}}); - const std::string yaml = R"EOF( -prefix_routes: - catch_all_cluster: fake_cluster -stat_prefix: foo -settings: - op_timeout: 0.02s - )EOF"; - - envoy::extensions::filters::network::redis_proxy::v3::RedisProxy proto_config{}; - TestUtility::loadFromYamlAndValidate(yaml, proto_config, true, false); - NiceMock context; - RedisProxyFilterConfigFactory factory; - Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); - EXPECT_TRUE(factory.isTerminalFilterByProto(proto_config, context)); - Network::MockConnection connection; - EXPECT_CALL(connection, addReadFilter(_)); - cb(connection); -} - TEST(RedisProxyFilterConfigFactoryTest, RedisProxyCorrectProto) { const std::string yaml = R"EOF( prefix_routes: diff --git a/test/extensions/filters/network/tcp_proxy/config_test.cc b/test/extensions/filters/network/tcp_proxy/config_test.cc index 201379a7ce6b0..4fabbeadcf224 100644 --- a/test/extensions/filters/network/tcp_proxy/config_test.cc +++ b/test/extensions/filters/network/tcp_proxy/config_test.cc @@ -19,94 +19,6 @@ namespace Extensions { namespace NetworkFilters { namespace TcpProxy { -class RouteIpListConfigTest : public testing::TestWithParam {}; - -INSTANTIATE_TEST_SUITE_P(IpList, RouteIpListConfigTest, - ::testing::Values(R"EOF("destination_ip_list": [ - { - "address_prefix": "192.168.1.1", - "prefix_len": 32 - }, - { - "address_prefix": "192.168.1.0", - "prefix_len": 24 - } - ], - "source_ip_list": [ - { - "address_prefix": "192.168.0.0", - "prefix_len": 16 - }, - { - "address_prefix": "192.0.0.0", - "prefix_len": 8 - }, - { - "address_prefix": "127.0.0.0", - "prefix_len": 8 - } - ],)EOF", - R"EOF("destination_ip_list": [ - { - "address_prefix": "2001:abcd::", - "prefix_len": 64 - }, - { - "address_prefix": "2002:ffff::", - "prefix_len": 32 - } - ], - "source_ip_list": [ - { - "address_prefix": "ffee::", - "prefix_len": 128 - }, - { - "address_prefix": "2001::abcd", - "prefix_len": 64 - }, - { - "address_prefix": "1234::5678", - "prefix_len": 128 - } - ],)EOF")); - -TEST_P(RouteIpListConfigTest, DEPRECATED_FEATURE_TEST(TcpProxy)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string json_string = R"EOF( - { - "stat_prefix": "my_stat_prefix", - "cluster": "foobar", - "deprecated_v1": { - "routes": [ - {)EOF" + GetParam() + - R"EOF("destination_ports": "1-1024,2048-4096,12345", - "cluster": "fake_cluster" - }, - { - "source_ports": "23457,23459", - "cluster": "fake_cluster2" - } - ] - } - } - )EOF"; - - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy proto_config; - TestUtility::loadFromJson(json_string, proto_config, true, false); - - NiceMock context; - ConfigFactory factory; - Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); - Network::MockConnection connection; - NiceMock readFilterCallback; - EXPECT_CALL(connection, addReadFilter(_)) - .WillRepeatedly(Invoke([&readFilterCallback](Network::ReadFilterSharedPtr filter) { - filter->initializeReadFilterCallbacks(readFilterCallback); - })); - cb(connection); -} - TEST(ConfigTest, ValidateFail) { NiceMock context; EXPECT_THROW(ConfigFactory().createFilterFactoryFromProto( diff --git a/test/extensions/filters/network/thrift_proxy/BUILD b/test/extensions/filters/network/thrift_proxy/BUILD index 6e6cb31a06288..cd3fe562efe15 100644 --- a/test/extensions/filters/network/thrift_proxy/BUILD +++ b/test/extensions/filters/network/thrift_proxy/BUILD @@ -28,6 +28,7 @@ envoy_extension_cc_mock( "//source/extensions/filters/network/thrift_proxy/router:router_ratelimit_interface", "//test/mocks/network:network_mocks", "//test/mocks/stream_info:stream_info_mocks", + "//test/mocks/upstream:upstream_mocks", "//test/test_common:printers_lib", ], ) @@ -270,6 +271,7 @@ envoy_extension_cc_test( "//source/extensions/filters/network/thrift_proxy:config", "//source/extensions/filters/network/thrift_proxy/router:config", "//source/extensions/filters/network/thrift_proxy/router:router_lib", + "//source/extensions/filters/network/thrift_proxy/router:shadow_writer_lib", "//test/mocks/network:network_mocks", "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:host_mocks", @@ -360,3 +362,22 @@ envoy_extension_cc_test( "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", ], ) + +envoy_extension_cc_test( + name = "shadow_writer_test", + srcs = ["shadow_writer_test.cc"], + extension_names = ["envoy.filters.network.thrift_proxy"], + deps = [ + ":mocks", + ":utility_lib", + "//source/extensions/filters/network/thrift_proxy:app_exception_lib", + "//source/extensions/filters/network/thrift_proxy:config", + "//source/extensions/filters/network/thrift_proxy/router:shadow_writer_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/upstream:host_mocks", + "//test/test_common:printers_lib", + "//test/test_common:registry_lib", + "@envoy_api//envoy/extensions/filters/network/thrift_proxy/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc index a41499d5f0aff..4e8c596a0647c 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/config_test.cc @@ -19,9 +19,9 @@ namespace RateLimitFilter { namespace { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit -parseRateLimitFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { +parseRateLimitFromV3Yaml(const std::string& yaml) { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit rate_limit; - TestUtility::loadFromYaml(yaml, rate_limit, false, avoid_boosting); + TestUtility::loadFromYaml(yaml, rate_limit); return rate_limit; } diff --git a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc index 88ff099bce754..8e386fff3bc20 100644 --- a/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/filters/ratelimit/ratelimit_test.cc @@ -53,7 +53,7 @@ class ThriftRateLimitFilterTest : public testing::Test { void setupTest(const std::string& yaml) { envoy::extensions::filters::network::thrift_proxy::filters::ratelimit::v3::RateLimit proto_config{}; - TestUtility::loadFromYaml(yaml, proto_config, false, true); + TestUtility::loadFromYaml(yaml, proto_config); config_ = std::make_shared(proto_config, local_info_, stats_store_, runtime_, cm_); diff --git a/test/extensions/filters/network/thrift_proxy/mocks.cc b/test/extensions/filters/network/thrift_proxy/mocks.cc index 8e93ace6f0c3e..e4a795a6fd0ce 100644 --- a/test/extensions/filters/network/thrift_proxy/mocks.cc +++ b/test/extensions/filters/network/thrift_proxy/mocks.cc @@ -130,12 +130,18 @@ MockRouteEntry::MockRouteEntry() { ON_CALL(*this, clusterName()).WillByDefault(ReturnRef(cluster_name_)); ON_CALL(*this, rateLimitPolicy()).WillByDefault(ReturnRef(rate_limit_policy_)); ON_CALL(*this, clusterHeader()).WillByDefault(ReturnRef(cluster_header_)); + ON_CALL(*this, requestMirrorPolicies()).WillByDefault(ReturnRef(policies_)); } MockRouteEntry::~MockRouteEntry() = default; MockRoute::MockRoute() { ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); } MockRoute::~MockRoute() = default; +MockShadowWriter::MockShadowWriter() { + ON_CALL(*this, submit(_, _, _, _)).WillByDefault(Return(router_handle_)); +} +MockShadowWriter::~MockShadowWriter() = default; + } // namespace Router } // namespace ThriftProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/thrift_proxy/mocks.h b/test/extensions/filters/network/thrift_proxy/mocks.h index b55d4bc466a9c..b3eddda2cb352 100644 --- a/test/extensions/filters/network/thrift_proxy/mocks.h +++ b/test/extensions/filters/network/thrift_proxy/mocks.h @@ -14,6 +14,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/stream_info/mocks.h" +#include "test/mocks/upstream/cluster_manager.h" #include "test/test_common/printers.h" #include "gmock/gmock.h" @@ -333,10 +334,13 @@ class MockRouteEntry : public RouteEntry { MOCK_METHOD(RateLimitPolicy&, rateLimitPolicy, (), (const)); MOCK_METHOD(bool, stripServiceName, (), (const)); MOCK_METHOD(const Http::LowerCaseString&, clusterHeader, (), (const)); + MOCK_METHOD(const std::vector>&, requestMirrorPolicies, (), + (const)); std::string cluster_name_{"fake_cluster"}; Http::LowerCaseString cluster_header_{""}; NiceMock rate_limit_policy_; + std::vector> policies_; }; class MockRoute : public Route { @@ -350,6 +354,21 @@ class MockRoute : public Route { NiceMock route_entry_; }; +class MockShadowWriter : public ShadowWriter { +public: + MockShadowWriter(); + ~MockShadowWriter() override; + + MOCK_METHOD(Upstream::ClusterManager&, clusterManager, (), ()); + MOCK_METHOD(std::string&, statPrefix, (), (const)); + MOCK_METHOD(Stats::Scope&, scope, (), ()); + MOCK_METHOD(Event::Dispatcher&, dispatcher, (), ()); + MOCK_METHOD(absl::optional>, submit, + (const std::string&, MessageMetadataSharedPtr, TransportType, ProtocolType), ()); + + absl::optional> router_handle_{absl::nullopt}; +}; + } // namespace Router } // namespace ThriftProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc index 8e19060b27e7e..4368eab78d72f 100644 --- a/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc +++ b/test/extensions/filters/network/thrift_proxy/route_matcher_test.cc @@ -20,9 +20,9 @@ namespace Router { namespace { envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration -parseRouteConfigurationFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { +parseRouteConfigurationFromV3Yaml(const std::string& yaml) { envoy::extensions::filters::network::thrift_proxy::v3::RouteConfiguration route_config; - TestUtility::loadFromYaml(yaml, route_config, false, avoid_boosting); + TestUtility::loadFromYaml(yaml, route_config); TestUtility::validate(route_config); return route_config; } diff --git a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc index 15c19bd2be7bb..d7be72660e9a4 100644 --- a/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_ratelimit_test.cc @@ -31,9 +31,9 @@ namespace { class ThriftRateLimitConfigurationTest : public testing::Test { public: - void initialize(const std::string& yaml, bool avoid_boosting = true) { + void initialize(const std::string& yaml) { envoy::extensions::filters::network::thrift_proxy::v3::ThriftProxy config; - TestUtility::loadFromYaml(yaml, config, false, avoid_boosting); + TestUtility::loadFromYaml(yaml, config); initialize(config); } @@ -47,8 +47,8 @@ class ThriftRateLimitConfigurationTest : public testing::Test { return *metadata_; } - std::unique_ptr config_; NiceMock factory_context_; + std::unique_ptr config_; Network::Address::Ipv4Instance default_remote_address_{"10.0.0.1"}; MessageMetadataSharedPtr metadata_; }; diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index ada3531938a74..3bcbb4bff7d52 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -9,6 +9,7 @@ #include "source/extensions/filters/network/thrift_proxy/config.h" #include "source/extensions/filters/network/thrift_proxy/router/config.h" #include "source/extensions/filters/network/thrift_proxy/router/router_impl.h" +#include "source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h" #include "test/extensions/filters/network/thrift_proxy/mocks.h" #include "test/extensions/filters/network/thrift_proxy/utility.h" @@ -67,43 +68,72 @@ class ThriftRouterTestBase { public: ThriftRouterTestBase() : transport_factory_([&]() -> MockTransport* { - ASSERT(transport_ == nullptr); - transport_ = new NiceMock(); - if (mock_transport_cb_) { - mock_transport_cb_(transport_); + // Create shadow transports. + auto transport = new NiceMock(); + transports_requested_++; + + // Ignore null response decoder transports. + bool is_response_transport = shadow_writer_impl_ != nullptr && + (transports_requested_ == 1 || transports_requested_ == 3); + if (!is_response_transport) { + if (mock_transport_cb_) { + mock_transport_cb_(transport); + } + all_transports_.push_back(transport); + transport_ = transport; } - return transport_; + + return transport; }), protocol_factory_([&]() -> MockProtocol* { - ASSERT(protocol_ == nullptr); - protocol_ = new NiceMock(); - if (mock_protocol_cb_) { - mock_protocol_cb_(protocol_); + // Create shadow protocols. + auto protocol = new NiceMock(); + protocols_requested_++; + + // Ditto for protocols. + bool is_response_protocol = shadow_writer_impl_ != nullptr && + (protocols_requested_ == 1 || protocols_requested_ == 3); + if (!is_response_protocol) { + if (mock_protocol_cb_) { + mock_protocol_cb_(protocol); + } + all_protocols_.push_back(protocol); + protocol_ = protocol; } - return protocol_; + + return protocol; }), transport_register_(transport_factory_), protocol_register_(protocol_factory_) { context_.cluster_manager_.initializeThreadLocalClusters({"cluster"}); } - void initializeRouter() { + void initializeRouter(bool use_real_shadow_writer = false) { route_ = new NiceMock(); route_ptr_.reset(route_); - router_ = std::make_unique(context_.clusterManager(), "test", context_.scope()); + if (!use_real_shadow_writer) { + router_ = std::make_unique(context_.clusterManager(), "test", context_.scope(), + context_.runtime(), shadow_writer_); + } else { + shadow_writer_impl_ = std::make_shared( + context_.clusterManager(), "test", context_.scope(), dispatcher_, context_.threadLocal()); + router_ = std::make_unique(context_.clusterManager(), "test", context_.scope(), + context_.runtime(), *shadow_writer_impl_); + } EXPECT_EQ(nullptr, router_->downstreamConnection()); router_->setDecoderFilterCallbacks(callbacks_); } - void initializeMetadata(MessageType msg_type, std::string method = "method") { + void initializeMetadata(MessageType msg_type, std::string method = "method", + int32_t sequence_id = 1) { msg_type_ = msg_type; metadata_ = std::make_shared(); metadata_->setMethodName(method); metadata_->setMessageType(msg_type_); - metadata_->setSequenceId(1); + metadata_->setSequenceId(sequence_id); } void startRequest(MessageType msg_type, std::string method = "method", @@ -170,14 +200,14 @@ class ThriftRouterTestBase { EXPECT_NE(nullptr, upstream_callbacks_); } - void startRequestWithExistingConnection(MessageType msg_type) { + void startRequestWithExistingConnection(MessageType msg_type, int32_t sequence_id = 1) { EXPECT_EQ(FilterStatus::Continue, router_->transportBegin({})); EXPECT_CALL(callbacks_, route()).WillOnce(Return(route_ptr_)); EXPECT_CALL(*route_, routeEntry()).WillOnce(Return(&route_entry_)); EXPECT_CALL(route_entry_, clusterName()).WillRepeatedly(ReturnRef(cluster_name_)); - initializeMetadata(msg_type); + initializeMetadata(msg_type, "method", sequence_id); EXPECT_CALL(*context_.cluster_manager_.thread_local_cluster_.tcp_conn_pool_.connection_data_, addUpstreamCallbacks(_)) @@ -203,10 +233,10 @@ class ThriftRouterTestBase { EXPECT_EQ(nullptr, router_->downstreamHeaders()); EXPECT_CALL(callbacks_, downstreamTransportType()) - .Times(2) + .Times(1) .WillRepeatedly(Return(TransportType::Framed)); EXPECT_CALL(callbacks_, downstreamProtocolType()) - .Times(2) + .Times(1) .WillRepeatedly(Return(ProtocolType::Binary)); mock_protocol_cb_ = [&](MockProtocol* protocol) -> void { @@ -233,20 +263,28 @@ class ThriftRouterTestBase { } void sendTrivialStruct(FieldType field_type) { - EXPECT_CALL(*protocol_, writeStructBegin(_, "")); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeStructBegin(_, "")); + } EXPECT_EQ(FilterStatus::Continue, router_->structBegin({})); int16_t id = 1; - EXPECT_CALL(*protocol_, writeFieldBegin(_, "", field_type, id)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeFieldBegin(_, "", field_type, id)); + } EXPECT_EQ(FilterStatus::Continue, router_->fieldBegin({}, field_type, id)); sendTrivialValue(field_type); - EXPECT_CALL(*protocol_, writeFieldEnd(_)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeFieldEnd(_)); + } EXPECT_EQ(FilterStatus::Continue, router_->fieldEnd()); - EXPECT_CALL(*protocol_, writeFieldBegin(_, "", FieldType::Stop, 0)); - EXPECT_CALL(*protocol_, writeStructEnd(_)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeFieldBegin(_, "", FieldType::Stop, 0)); + EXPECT_CALL(*protocol, writeStructEnd(_)); + } EXPECT_EQ(FilterStatus::Continue, router_->structEnd()); } @@ -254,37 +292,51 @@ class ThriftRouterTestBase { switch (field_type) { case FieldType::Bool: { bool v = true; - EXPECT_CALL(*protocol_, writeBool(_, v)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeBool(_, v)); + } EXPECT_EQ(FilterStatus::Continue, router_->boolValue(v)); } break; case FieldType::Byte: { uint8_t v = 2; - EXPECT_CALL(*protocol_, writeByte(_, v)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeByte(_, v)); + } EXPECT_EQ(FilterStatus::Continue, router_->byteValue(v)); } break; case FieldType::I16: { int16_t v = 3; - EXPECT_CALL(*protocol_, writeInt16(_, v)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeInt16(_, v)); + } EXPECT_EQ(FilterStatus::Continue, router_->int16Value(v)); } break; case FieldType::I32: { int32_t v = 4; - EXPECT_CALL(*protocol_, writeInt32(_, v)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeInt32(_, v)); + } EXPECT_EQ(FilterStatus::Continue, router_->int32Value(v)); } break; case FieldType::I64: { int64_t v = 5; - EXPECT_CALL(*protocol_, writeInt64(_, v)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeInt64(_, v)); + } EXPECT_EQ(FilterStatus::Continue, router_->int64Value(v)); } break; case FieldType::Double: { double v = 6.0; - EXPECT_CALL(*protocol_, writeDouble(_, v)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeDouble(_, v)); + } EXPECT_EQ(FilterStatus::Continue, router_->doubleValue(v)); } break; case FieldType::String: { std::string v = "seven"; - EXPECT_CALL(*protocol_, writeString(_, v)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeString(_, v)); + } EXPECT_EQ(FilterStatus::Continue, router_->stringValue(v)); } break; default: @@ -292,9 +344,94 @@ class ThriftRouterTestBase { } } + void sendTrivialMap() { + FieldType container_type = FieldType::I32; + uint32_t size = 2; + + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeMapBegin(_, container_type, container_type, size)); + } + EXPECT_EQ(FilterStatus::Continue, router_->mapBegin(container_type, container_type, size)); + + for (int i = 0; i < 2; i++) { + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeInt32(_, i)); + } + EXPECT_EQ(FilterStatus::Continue, router_->int32Value(i)); + + int j = i + 100; + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeInt32(_, j)); + } + EXPECT_EQ(FilterStatus::Continue, router_->int32Value(j)); + } + + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeMapEnd(_)); + } + EXPECT_EQ(FilterStatus::Continue, router_->mapEnd()); + } + + void sendTrivialList() { + FieldType container_type = FieldType::I32; + uint32_t size = 3; + + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeListBegin(_, container_type, size)); + } + EXPECT_EQ(FilterStatus::Continue, router_->listBegin(container_type, size)); + + for (int i = 0; i < 3; i++) { + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeInt32(_, i)); + } + EXPECT_EQ(FilterStatus::Continue, router_->int32Value(i)); + } + + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeListEnd(_)); + } + EXPECT_EQ(FilterStatus::Continue, router_->listEnd()); + } + + void sendTrivialSet() { + FieldType container_type = FieldType::I32; + uint32_t size = 4; + + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeSetBegin(_, container_type, size)); + } + EXPECT_EQ(FilterStatus::Continue, router_->setBegin(container_type, size)); + + for (int i = 0; i < 4; i++) { + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeInt32(_, i)); + } + EXPECT_EQ(FilterStatus::Continue, router_->int32Value(i)); + } + + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeSetEnd(_)); + } + EXPECT_EQ(FilterStatus::Continue, router_->setEnd()); + } + + void sendPassthroughData() { + Buffer::OwnedImpl buffer; + buffer.add("hello"); + + EXPECT_EQ(FilterStatus::Continue, router_->passthroughData(buffer)); + } + void completeRequest() { - EXPECT_CALL(*protocol_, writeMessageEnd(_)); - EXPECT_CALL(*transport_, encodeFrame(_, _, _)); + for (auto& protocol : all_protocols_) { + EXPECT_CALL(*protocol, writeMessageEnd(_)); + } + + for (auto& transport : all_transports_) { + EXPECT_CALL(*transport, encodeFrame(_, _, _)); + } + EXPECT_CALL(upstream_connection_, write(_, false)); if (msg_type_ == MessageType::Oneway) { @@ -341,20 +478,28 @@ class ThriftRouterTestBase { std::function mock_transport_cb_{}; std::function mock_protocol_cb_{}; + NiceMock dispatcher_; NiceMock context_; + + std::unique_ptr router_; + MockShadowWriter shadow_writer_; + std::shared_ptr shadow_writer_impl_; + NiceMock connection_; - NiceMock dispatcher_; NiceMock time_source_; NiceMock callbacks_; NiceMock* transport_{}; NiceMock* protocol_{}; + std::vector*> all_transports_{}; + std::vector*> all_protocols_{}; + int32_t transports_requested_{}; + int32_t protocols_requested_{}; NiceMock* route_{}; NiceMock route_entry_; NiceMock* host_{}; Tcp::ConnectionPool::ConnectionStatePtr conn_state_; RouteConstSharedPtr route_ptr_; - std::unique_ptr router_; std::string cluster_name_{"cluster"}; @@ -1393,6 +1538,71 @@ TEST_F(ThriftRouterTest, RequestResponseSize) { destroyRouter(); } +TEST_F(ThriftRouterTest, ShadowRequests) { + struct ShadowClusterInfo { + NiceMock cluster; + NiceMock connection; + Tcp::ConnectionPool::ConnectionStatePtr conn_state; + }; + using ShadowClusterInfoPtr = std::shared_ptr; + absl::flat_hash_map shadow_clusters; + + shadow_clusters.try_emplace("shadow_cluster_1", std::make_shared()); + shadow_clusters.try_emplace("shadow_cluster_2", std::make_shared()); + + for (auto& [name, shadow_cluster_info] : shadow_clusters) { + auto& shadow_cluster = shadow_cluster_info->cluster; + auto& upstream_connection = shadow_cluster_info->connection; + auto& conn_state = shadow_cluster_info->conn_state; + + ON_CALL(context_.cluster_manager_, getThreadLocalCluster(absl::string_view(name))) + .WillByDefault(Return(&shadow_cluster)); + EXPECT_CALL(shadow_cluster.tcp_conn_pool_, newConnection(_)) + .WillOnce( + Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* { + shadow_cluster.tcp_conn_pool_.newConnectionImpl(cb); + shadow_cluster.tcp_conn_pool_.poolReady(upstream_connection); + return nullptr; + })); + EXPECT_CALL(upstream_connection, close(_)); + + EXPECT_CALL(*shadow_cluster.tcp_conn_pool_.connection_data_, connectionState()) + .WillRepeatedly( + Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state.get(); })); + EXPECT_CALL(*shadow_cluster.tcp_conn_pool_.connection_data_, setConnectionState_(_)) + .WillOnce(Invoke( + [&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { conn_state.swap(cs); })); + + // Set up policies. + envoy::type::v3::FractionalPercent default_value; + auto policy = std::make_shared(name, "", default_value); + route_entry_.policies_.push_back(policy); + } + + initializeRouter(true); + + // Set sequence id to 0, since that's what the new connections used for shadow requests will use. + startRequestWithExistingConnection(MessageType::Call, 0); + + std::vector field_types = {FieldType::Bool, FieldType::Byte, FieldType::I16, + FieldType::I32, FieldType::I64, FieldType::Double, + FieldType::String}; + for (const auto& field_type : field_types) { + sendTrivialStruct(field_type); + } + + sendTrivialMap(); + sendTrivialList(); + sendTrivialSet(); + sendPassthroughData(); + + completeRequest(); + returnResponse(); + destroyRouter(); + + shadow_writer_impl_ = nullptr; +} + } // namespace Router } // namespace ThriftProxy } // namespace NetworkFilters diff --git a/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc b/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc new file mode 100644 index 0000000000000..5532da024e32c --- /dev/null +++ b/test/extensions/filters/network/thrift_proxy/shadow_writer_test.cc @@ -0,0 +1,471 @@ +#include + +#include "envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.pb.h" +#include "envoy/tcp/conn_pool.h" + +#include "source/common/buffer/buffer_impl.h" +#include "source/extensions/filters/network/thrift_proxy/app_exception_impl.h" +#include "source/extensions/filters/network/thrift_proxy/router/shadow_writer_impl.h" + +#include "test/extensions/filters/network/thrift_proxy/mocks.h" +#include "test/extensions/filters/network/thrift_proxy/utility.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/mocks/upstream/host.h" +#include "test/test_common/printers.h" +#include "test/test_common/registry.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Return; +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ThriftProxy { +namespace Router { + +struct MockNullResponseDecoder : public NullResponseDecoder { + MockNullResponseDecoder(Transport& transport, Protocol& protocol) + : NullResponseDecoder(transport, protocol) {} + + MOCK_METHOD(ThriftFilters::ResponseStatus, upstreamData, (Buffer::Instance & data), ()); +}; + +class ShadowWriterTest : public testing::Test { +public: + ShadowWriterTest() { + shadow_writer_ = std::make_shared(cm_, "test", context_.scope(), dispatcher_, + context_.threadLocal()); + metadata_ = std::make_shared(); + metadata_->setMethodName("ping"); + metadata_->setMessageType(MessageType::Call); + metadata_->setSequenceId(1); + + host_ = std::make_shared>(); + } + + void testPoolReady(bool oneway = false) { + NiceMock connection; + + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(&cluster_)); + EXPECT_CALL(*cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(false)); + EXPECT_CALL(cluster_, tcpConnPool(_, _)) + .WillOnce(Return(Upstream::TcpPoolData([]() {}, &conn_pool_))); + EXPECT_CALL(conn_pool_, newConnection(_)) + .WillOnce(Invoke( + [&](Tcp::ConnectionPool::Callbacks& callbacks) -> Tcp::ConnectionPool::Cancellable* { + auto data = + std::make_unique>(); + EXPECT_CALL(*data, connectionState()) + .WillRepeatedly(Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { + return conn_state_.get(); + })); + EXPECT_CALL(*data, setConnectionState_(_)) + .WillOnce(Invoke([&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { + conn_state_.swap(cs); + })); + EXPECT_CALL(*data, connection()).WillRepeatedly(ReturnRef(connection)); + callbacks.onPoolReady(std::move(data), host_); + return nullptr; + })); + + auto router_handle = shadow_writer_->submit("shadow_cluster", metadata_, TransportType::Framed, + ProtocolType::Binary); + EXPECT_NE(absl::nullopt, router_handle); + EXPECT_CALL(connection, write(_, false)); + + auto& request_owner = router_handle.value().get().requestOwner(); + runRequestMethods(request_owner); + + // The following is a no-op, since no callbacks are pending. + request_owner.continueDecoding(); + + if (!oneway) { + EXPECT_CALL(connection, close(_)); + } + + shadow_writer_ = nullptr; + + const std::string counter_name = + oneway ? "thrift.upstream_rq_oneway" : "thrift.upstream_rq_call"; + EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope().counterFromString(counter_name).value()); + } + + void testOnUpstreamData(MessageType message_type = MessageType::Reply, bool success = true, + bool on_data_throw_app_exception = false, + bool on_data_throw_regular_exception = false, + bool close_before_response = false) { + NiceMock connection; + + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(&cluster_)); + EXPECT_CALL(*cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(false)); + EXPECT_CALL(cluster_, tcpConnPool(_, _)) + .WillOnce(Return(Upstream::TcpPoolData([]() {}, &conn_pool_))); + EXPECT_CALL(conn_pool_, newConnection(_)) + .WillOnce(Invoke( + [&](Tcp::ConnectionPool::Callbacks& callbacks) -> Tcp::ConnectionPool::Cancellable* { + auto data = + std::make_unique>(); + EXPECT_CALL(*data, connectionState()) + .WillRepeatedly(Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { + return conn_state_.get(); + })); + EXPECT_CALL(*data, setConnectionState_(_)) + .WillOnce(Invoke([&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { + conn_state_.swap(cs); + })); + + EXPECT_CALL(*data, connection()).WillRepeatedly(ReturnRef(connection)); + callbacks.onPoolReady(std::move(data), host_); + return nullptr; + })); + + ShadowRouterImpl shadow_router(*shadow_writer_, "shadow_cluster", metadata_, + TransportType::Framed, ProtocolType::Binary); + EXPECT_TRUE(shadow_router.createUpstreamRequest()); + + // Exercise methods are no-ops by design. + shadow_router.resetDownstreamConnection(); + shadow_router.onAboveWriteBufferHighWatermark(); + shadow_router.onBelowWriteBufferLowWatermark(); + shadow_router.downstreamConnection(); + shadow_router.metadataMatchCriteria(); + + EXPECT_CALL(connection, write(_, false)); + shadow_router.messageEnd(); + + if (close_before_response) { + shadow_router.onEvent(Network::ConnectionEvent::LocalClose); + return; + } + + // Prepare response metadata & data processing. + MessageMetadataSharedPtr response_metadata = std::make_shared(); + response_metadata->setMessageType(message_type); + response_metadata->setSequenceId(1); + + auto transport_ptr = + NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport(); + auto protocol_ptr = + NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol(); + auto decoder_ptr = std::make_unique(*transport_ptr, *protocol_ptr); + decoder_ptr->messageBegin(response_metadata); + decoder_ptr->success_ = success; + + if (on_data_throw_regular_exception || on_data_throw_app_exception) { + EXPECT_CALL(connection, close(_)); + EXPECT_CALL(*decoder_ptr, upstreamData(_)) + .WillOnce(Return(ThriftFilters::ResponseStatus::Reset)); + } else { + EXPECT_CALL(*decoder_ptr, upstreamData(_)) + .WillOnce(Return(ThriftFilters::ResponseStatus::Complete)); + } + + shadow_router.upstream_response_callbacks_ = + std::make_unique(*decoder_ptr); + + Buffer::OwnedImpl response_buffer; + shadow_router.onUpstreamData(response_buffer, false); + + if (on_data_throw_regular_exception || on_data_throw_app_exception) { + return; + } + + // Check stats. + switch (message_type) { + case MessageType::Reply: + EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_reply") + .value()); + if (success) { + EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_success") + .value()); + } else { + EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_error") + .value()); + } + break; + case MessageType::Exception: + EXPECT_EQ(1UL, cluster_.cluster_.info_->statsScope() + .counterFromString("thrift.upstream_resp_exception") + .value()); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + + void runRequestMethods(RequestOwner& request_owner) { + Buffer::OwnedImpl passthrough_data; + FieldType field_type; + FieldType key_type; + FieldType value_type; + int16_t field_id = 0; + bool bool_value = false; + uint8_t byte_value = 0; + int16_t int16_value = 0; + int32_t int32_value = 0; + int64_t int64_value = 0; + double double_value = 0.0; + uint32_t container_size = 1; + + EXPECT_EQ(FilterStatus::Continue, request_owner.transportBegin(nullptr)); + EXPECT_EQ(FilterStatus::Continue, request_owner.passthroughData(passthrough_data)); + EXPECT_EQ(FilterStatus::Continue, request_owner.structBegin("")); + EXPECT_EQ(FilterStatus::Continue, request_owner.fieldBegin("", field_type, field_id)); + EXPECT_EQ(FilterStatus::Continue, request_owner.fieldEnd()); + EXPECT_EQ(FilterStatus::Continue, request_owner.structEnd()); + EXPECT_EQ(FilterStatus::Continue, request_owner.boolValue(bool_value)); + EXPECT_EQ(FilterStatus::Continue, request_owner.byteValue(byte_value)); + EXPECT_EQ(FilterStatus::Continue, request_owner.int16Value(int16_value)); + EXPECT_EQ(FilterStatus::Continue, request_owner.int32Value(int32_value)); + EXPECT_EQ(FilterStatus::Continue, request_owner.int64Value(int64_value)); + EXPECT_EQ(FilterStatus::Continue, request_owner.doubleValue(double_value)); + EXPECT_EQ(FilterStatus::Continue, request_owner.stringValue("")); + EXPECT_EQ(FilterStatus::Continue, request_owner.mapBegin(key_type, value_type, container_size)); + EXPECT_EQ(FilterStatus::Continue, request_owner.mapEnd()); + EXPECT_EQ(FilterStatus::Continue, request_owner.listBegin(field_type, container_size)); + EXPECT_EQ(FilterStatus::Continue, request_owner.listEnd()); + EXPECT_EQ(FilterStatus::Continue, request_owner.setBegin(field_type, container_size)); + EXPECT_EQ(FilterStatus::Continue, request_owner.setEnd()); + EXPECT_EQ(FilterStatus::Continue, request_owner.messageEnd()); + EXPECT_EQ(FilterStatus::Continue, request_owner.transportEnd()); + } + + NiceMock cluster_; + Tcp::ConnectionPool::ConnectionStatePtr conn_state_; + NiceMock cm_; + NiceMock context_; + NiceMock dispatcher_; + Envoy::ConnectionPool::MockCancellable cancellable_; + MessageMetadataSharedPtr metadata_; + NiceMock conn_pool_; + std::shared_ptr> host_; + std::shared_ptr shadow_writer_; +}; + +TEST_F(ShadowWriterTest, SubmitClusterNotFound) { + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(nullptr)); + auto router_handle = shadow_writer_->submit("shadow_cluster", metadata_, TransportType::Framed, + ProtocolType::Binary); + EXPECT_EQ(absl::nullopt, router_handle); + EXPECT_EQ(1U, context_.scope().counterFromString("test.shadow_request_submit_failure").value()); +} + +TEST_F(ShadowWriterTest, SubmitClusterInMaintenance) { + std::shared_ptr cluster = + std::make_shared>(); + EXPECT_CALL(*cluster->cluster_.info_, maintenanceMode()).WillOnce(Return(true)); + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(cluster.get())); + auto router_handle = shadow_writer_->submit("shadow_cluster", metadata_, TransportType::Framed, + ProtocolType::Binary); + EXPECT_EQ(absl::nullopt, router_handle); + EXPECT_EQ(1U, context_.scope().counterFromString("test.shadow_request_submit_failure").value()); +} + +TEST_F(ShadowWriterTest, SubmitNoHealthyUpstream) { + metadata_->setMessageType(MessageType::Oneway); + + std::shared_ptr cluster = + std::make_shared>(); + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(cluster.get())); + EXPECT_CALL(*cluster->cluster_.info_, maintenanceMode()).WillOnce(Return(false)); + EXPECT_CALL(*cluster, tcpConnPool(_, _)).WillOnce(Return(absl::nullopt)); + auto router_handle = shadow_writer_->submit("shadow_cluster", metadata_, TransportType::Framed, + ProtocolType::Binary); + EXPECT_EQ(absl::nullopt, router_handle); + EXPECT_EQ(1U, context_.scope().counterFromString("test.shadow_request_submit_failure").value()); + + // We still count the request, even if it didn't go through. + EXPECT_EQ( + 1UL, + cluster->cluster_.info_->statsScope().counterFromString("thrift.upstream_rq_oneway").value()); +} + +TEST_F(ShadowWriterTest, SubmitConnectionNotReady) { + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(&cluster_)); + EXPECT_CALL(*cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(false)); + EXPECT_CALL(cluster_, tcpConnPool(_, _)) + .WillOnce(Return(Upstream::TcpPoolData([]() {}, &conn_pool_))); + EXPECT_CALL(cancellable_, cancel(_)); + EXPECT_CALL(conn_pool_, newConnection(_)) + .WillOnce(Invoke([&](Tcp::ConnectionPool::Callbacks&) -> Tcp::ConnectionPool::Cancellable* { + return &cancellable_; + })); + auto router_handle = shadow_writer_->submit("shadow_cluster", metadata_, TransportType::Framed, + ProtocolType::Binary); + EXPECT_NE(absl::nullopt, router_handle); + EXPECT_TRUE(router_handle.value().get().waitingForConnection()); + + EXPECT_EQ( + 1UL, + cluster_.cluster_.info_->statsScope().counterFromString("thrift.upstream_rq_call").value()); +} + +TEST_F(ShadowWriterTest, ShadowRequestPoolReady) { testPoolReady(); } + +TEST_F(ShadowWriterTest, ShadowRequestPoolReadyOneWay) { + metadata_->setMessageType(MessageType::Oneway); + testPoolReady(true); +} + +TEST_F(ShadowWriterTest, ShadowRequestWriteBeforePoolReady) { + Tcp::ConnectionPool::Callbacks* callbacks; + + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(&cluster_)); + EXPECT_CALL(*cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(false)); + EXPECT_CALL(cluster_, tcpConnPool(_, _)) + .WillOnce(Return(Upstream::TcpPoolData([]() {}, &conn_pool_))); + EXPECT_CALL(conn_pool_, newConnection(_)) + .WillOnce( + Invoke([&](Tcp::ConnectionPool::Callbacks& cb) -> Tcp::ConnectionPool::Cancellable* { + callbacks = &cb; + return &cancellable_; + })); + + auto router_handle = shadow_writer_->submit("shadow_cluster", metadata_, TransportType::Framed, + ProtocolType::Binary); + EXPECT_NE(absl::nullopt, router_handle); + + // Write before connection is ready. + auto& request_owner = router_handle.value().get().requestOwner(); + runRequestMethods(request_owner); + + NiceMock connection; + auto data = std::make_unique>(); + EXPECT_CALL(*data, connection()).WillRepeatedly(ReturnRef(connection)); + EXPECT_CALL(*data, connectionState()) + .WillRepeatedly( + Invoke([&]() -> Tcp::ConnectionPool::ConnectionState* { return conn_state_.get(); })); + EXPECT_CALL(*data, setConnectionState_(_)) + .WillOnce(Invoke( + [&](Tcp::ConnectionPool::ConnectionStatePtr& cs) -> void { conn_state_.swap(cs); })); + + EXPECT_CALL(connection, write(_, false)); + callbacks->onPoolReady(std::move(data), host_); + + EXPECT_CALL(connection, close(_)); + shadow_writer_ = nullptr; + + EXPECT_EQ( + 1UL, + cluster_.cluster_.info_->statsScope().counterFromString("thrift.upstream_rq_call").value()); +} + +TEST_F(ShadowWriterTest, ShadowRequestPoolFailure) { + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(&cluster_)); + EXPECT_CALL(*cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(false)); + EXPECT_CALL(cluster_, tcpConnPool(_, _)) + .WillOnce(Return(Upstream::TcpPoolData([]() {}, &conn_pool_))); + EXPECT_CALL(conn_pool_, newConnection(_)) + .WillOnce(Invoke([&](Tcp::ConnectionPool::Callbacks& callbacks) + -> Tcp::ConnectionPool::Cancellable* { + auto data = std::make_unique>(); + EXPECT_CALL(*data, connection()).Times(0); + callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Overflow, "failure", nullptr); + return nullptr; + })); + + auto router_handle = shadow_writer_->submit("shadow_cluster", metadata_, TransportType::Framed, + ProtocolType::Binary); + EXPECT_NE(absl::nullopt, router_handle); + router_handle.value().get().requestOwner().messageEnd(); +} + +TEST_F(ShadowWriterTest, ShadowRequestOnUpstreamDataReplySuccess) { + testOnUpstreamData(MessageType::Reply, true); +} + +TEST_F(ShadowWriterTest, ShadowRequestOnUpstreamDataReplyError) { + testOnUpstreamData(MessageType::Reply, false); +} + +TEST_F(ShadowWriterTest, ShadowRequestOnUpstreamDataReplyException) { + testOnUpstreamData(MessageType::Reply, false); +} + +TEST_F(ShadowWriterTest, ShadowRequestOnUpstreamDataAppException) { + testOnUpstreamData(MessageType::Reply, false, true, false); +} + +TEST_F(ShadowWriterTest, ShadowRequestOnUpstreamDataRegularException) { + testOnUpstreamData(MessageType::Reply, false, false, true); +} + +TEST_F(ShadowWriterTest, ShadowRequestOnUpstreamRemoteClose) { + testOnUpstreamData(MessageType::Reply, false, false, false, true); +} + +TEST_F(ShadowWriterTest, TestNullResponseDecoder) { + auto transport_ptr = + NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport(); + auto protocol_ptr = NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol(); + auto decoder_ptr = std::make_unique(*transport_ptr, *protocol_ptr); + + decoder_ptr->newDecoderEventHandler(); + EXPECT_TRUE(decoder_ptr->passthroughEnabled()); + + metadata_->setMessageType(MessageType::Reply); + EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageBegin(metadata_)); + + Buffer::OwnedImpl buffer; + decoder_ptr->upstreamData(buffer); + + EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageEnd()); + + // First reply field. + { + FieldType field_type; + int16_t field_id = 0; + EXPECT_EQ(FilterStatus::Continue, decoder_ptr->messageBegin(metadata_)); + EXPECT_EQ(FilterStatus::Continue, decoder_ptr->fieldBegin("", field_type, field_id)); + EXPECT_TRUE(decoder_ptr->responseSuccess()); + } + + EXPECT_EQ(FilterStatus::Continue, decoder_ptr->transportBegin(nullptr)); + EXPECT_EQ(FilterStatus::Continue, decoder_ptr->transportEnd()); +} + +struct MockOnDataNullResponseDecoder : public NullResponseDecoder { + MockOnDataNullResponseDecoder(Transport& transport, Protocol& protocol) + : NullResponseDecoder(transport, protocol) {} + + MOCK_METHOD(bool, onData, (), ()); +}; + +TEST_F(ShadowWriterTest, NullResponseDecoderExceptionHandling) { + auto transport_ptr = + NamedTransportConfigFactory::getFactory(TransportType::Framed).createTransport(); + auto protocol_ptr = NamedProtocolConfigFactory::getFactory(ProtocolType::Binary).createProtocol(); + auto decoder_ptr = std::make_unique(*transport_ptr, *protocol_ptr); + + { + EXPECT_CALL(*decoder_ptr, onData()).WillOnce(Invoke([&]() -> bool { + throw EnvoyException("exception"); + })); + + Buffer::OwnedImpl buffer; + EXPECT_EQ(ThriftFilters::ResponseStatus::Reset, decoder_ptr->upstreamData(buffer)); + } + + { + EXPECT_CALL(*decoder_ptr, onData()).WillOnce(Invoke([&]() -> bool { + throw AppException(AppExceptionType::InternalError, "exception"); + })); + + Buffer::OwnedImpl buffer; + EXPECT_EQ(ThriftFilters::ResponseStatus::Reset, decoder_ptr->upstreamData(buffer)); + } +} + +} // namespace Router +} // namespace ThriftProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/zookeeper_proxy/config_test.cc b/test/extensions/filters/network/zookeeper_proxy/config_test.cc index e88dbacd1ab06..a465c5c47eff5 100644 --- a/test/extensions/filters/network/zookeeper_proxy/config_test.cc +++ b/test/extensions/filters/network/zookeeper_proxy/config_test.cc @@ -49,7 +49,7 @@ stat_prefix: test_prefix )EOF"; ZooKeeperProxyProtoConfig proto_config; - TestUtility::loadFromYamlAndValidate(yaml, proto_config, false, true); + TestUtility::loadFromYamlAndValidate(yaml, proto_config); testing::NiceMock context; ZooKeeperConfigFactory factory; diff --git a/test/extensions/formatter/metadata/BUILD b/test/extensions/formatter/metadata/BUILD new file mode 100644 index 0000000000000..8889de88085cf --- /dev/null +++ b/test/extensions/formatter/metadata/BUILD @@ -0,0 +1,28 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "metadata_test", + srcs = ["metadata_test.cc"], + extension_names = ["envoy.formatter.metadata"], + deps = [ + "//source/common/formatter:substitution_formatter_lib", + "//source/common/json:json_loader_lib", + "//source/extensions/formatter/metadata:config", + "//source/extensions/formatter/metadata:metadata_lib", + "//test/mocks/server:factory_context_mocks", + "//test/mocks/stream_info:stream_info_mocks", + "//test/test_common:test_runtime_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/formatter/metadata/metadata_test.cc b/test/extensions/formatter/metadata/metadata_test.cc new file mode 100644 index 0000000000000..a58fa44abd50e --- /dev/null +++ b/test/extensions/formatter/metadata/metadata_test.cc @@ -0,0 +1,115 @@ +#include "envoy/config/core/v3/substitution_format_string.pb.validate.h" + +#include "source/common/formatter/substitution_format_string.h" +#include "source/common/formatter/substitution_formatter.h" + +#include "test/mocks/server/factory_context.h" +#include "test/mocks/stream_info/mocks.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Formatter { + +class MetadataFormatterTest : public ::testing::Test { +public: + MetadataFormatterTest() { + // Create metadata object with test values. + ProtobufWkt::Struct struct_obj; + auto& fields_map = *struct_obj.mutable_fields(); + fields_map["test_key"] = ValueUtil::stringValue("test_value"); + (*metadata_.mutable_filter_metadata())["metadata.test"] = struct_obj; + } + + // Method creates a yaml config for specific access log METADATA type. + ::Envoy::Formatter::FormatterPtr getTestMetadataFormatter(std::string type, + std::string tag = "METADATA") { + const std::string yaml = fmt::format(R"EOF( + text_format_source: + inline_string: "%{}({}:metadata.test:test_key)%" + formatters: + - name: envoy.formatter.metadata + typed_config: + "@type": type.googleapis.com/envoy.extensions.formatter.metadata.v3.Metadata +)EOF", + tag, type); + TestUtility::loadFromYaml(yaml, config_); + return Envoy::Formatter::SubstitutionFormatStringUtils::fromProtoConfig(config_, context_); + } + + Http::TestRequestHeaderMapImpl request_headers_; + Http::TestResponseHeaderMapImpl response_headers_; + Http::TestResponseTrailerMapImpl response_trailers_; + StreamInfo::MockStreamInfo stream_info_; + std::string body_; + + envoy::config::core::v3::SubstitutionFormatString config_; + NiceMock context_; + envoy::config::core::v3::Metadata metadata_; +}; + +// Exception should be thrown for tags different than METADATA. +TEST_F(MetadataFormatterTest, IncorrectTag) { + EXPECT_THROW(getTestMetadataFormatter("ROUTE", "BLAH_BLAH"), EnvoyException); +} + +// Exception should be thrown for unknown type of metadata. +TEST_F(MetadataFormatterTest, NonExistingMetadataProvider) { + EXPECT_THROW(getTestMetadataFormatter("BLAH"), EnvoyException); +} + +// Extensive testing of Dynamic Metadata formatter is in +// test/common/formatter/substitution_formatter_test.cc file. +// Here just make sure that METADATA(DYNAMIC .... returns +// Dynamic Metadata formatter and dynamicMetadata() is called. +TEST_F(MetadataFormatterTest, DynamicMetadata) { + // Make sure that formatter accesses dynamic metadata. + EXPECT_CALL(testing::Const(stream_info_), dynamicMetadata()) + .WillRepeatedly(testing::ReturnRef(metadata_)); + + EXPECT_EQ("test_value", + getTestMetadataFormatter("DYNAMIC")->format(request_headers_, response_headers_, + response_trailers_, stream_info_, body_)); +} + +// Extensive testing of Cluster Metadata formatter is in +// test/common/formatter/substitution_formatter_test.cc file. +// Here just make sure that METADATA(CLUSTER .... accesses +// cluster's metadata object. +TEST_F(MetadataFormatterTest, ClusterMetadata) { + // Make sure that formatter accesses cluster metadata. + absl::optional>> cluster = + std::make_shared>(); + EXPECT_CALL(**cluster, metadata()).WillRepeatedly(testing::ReturnRef(metadata_)); + EXPECT_CALL(stream_info_, upstreamClusterInfo()).WillRepeatedly(testing::ReturnPointee(cluster)); + + EXPECT_EQ("test_value", + getTestMetadataFormatter("CLUSTER")->format(request_headers_, response_headers_, + response_trailers_, stream_info_, body_)); +} + +// Test that METADATA(ROUTE accesses stream_info's Route. +TEST_F(MetadataFormatterTest, RouteMetadata) { + std::shared_ptr route{new NiceMock()}; + EXPECT_CALL(*route, metadata()).WillRepeatedly(testing::ReturnRef(metadata_)); + EXPECT_CALL(stream_info_, route()).WillRepeatedly(testing::Return(route)); + + EXPECT_EQ("test_value", + getTestMetadataFormatter("ROUTE")->format(request_headers_, response_headers_, + response_trailers_, stream_info_, body_)); +} + +// Make sure that code handles nullptr returned for stream_info::route(). +TEST_F(MetadataFormatterTest, NonExistentRouteMetadata) { + EXPECT_CALL(stream_info_, route()).WillRepeatedly(testing::Return(nullptr)); + + EXPECT_EQ("-", getTestMetadataFormatter("ROUTE")->format( + request_headers_, response_headers_, response_trailers_, stream_info_, body_)); +} + +} // namespace Formatter +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/health_checkers/redis/config_test.cc b/test/extensions/health_checkers/redis/config_test.cc index fbd5db3ee52e0..644bf0538e264 100644 --- a/test/extensions/health_checkers/redis/config_test.cc +++ b/test/extensions/health_checkers/redis/config_test.cc @@ -21,31 +21,6 @@ namespace { using CustomRedisHealthChecker = Extensions::HealthCheckers::RedisHealthChecker::RedisHealthChecker; -TEST(HealthCheckerFactoryTest, DEPRECATED_FEATURE_TEST(CreateRedisDeprecated)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - interval_jitter: 1s - unhealthy_threshold: 1 - healthy_threshold: 1 - custom_health_check: - name: envoy.health_checkers.redis - config: - key: foo - )EOF"; - - NiceMock context; - - RedisHealthCheckerFactory factory; - EXPECT_NE(nullptr, dynamic_cast( - factory - .createCustomHealthChecker( - Upstream::parseHealthCheckFromV3Yaml(yaml, false), context) - .get())); -} - TEST(HealthCheckerFactoryTest, CreateRedis) { const std::string yaml = R"EOF( timeout: 1s @@ -71,30 +46,6 @@ TEST(HealthCheckerFactoryTest, CreateRedis) { .get())); } -TEST(HealthCheckerFactoryTest, DEPRECATED_FEATURE_TEST(CreateRedisWithoutKeyDeprecated)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - interval_jitter: 1s - unhealthy_threshold: 1 - healthy_threshold: 1 - custom_health_check: - name: envoy.health_checkers.redis - config: - )EOF"; - - NiceMock context; - - RedisHealthCheckerFactory factory; - EXPECT_NE(nullptr, dynamic_cast( - factory - .createCustomHealthChecker( - Upstream::parseHealthCheckFromV3Yaml(yaml, false), context) - .get())); -} - TEST(HealthCheckerFactoryTest, CreateRedisWithoutKey) { const std::string yaml = R"EOF( timeout: 1s diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index a5c012bbf8bbf..f0e43e0015d2d 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -186,29 +186,6 @@ class RedisHealthCheckerTest Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); } - void setupExistsHealthcheckDeprecated(bool avoid_boosting = true) { - const std::string yaml = R"EOF( - timeout: 1s - interval: 1s - no_traffic_interval: 5s - interval_jitter: 1s - unhealthy_threshold: 1 - healthy_threshold: 1 - custom_health_check: - name: envoy.health_checkers.redis - config: - key: foo - )EOF"; - - const auto& health_check_config = Upstream::parseHealthCheckFromV3Yaml(yaml, avoid_boosting); - const auto& redis_config = getRedisHealthCheckConfig( - health_check_config, ProtobufMessage::getStrictValidationVisitor()); - - health_checker_ = std::make_shared( - *cluster_, health_check_config, redis_config, dispatcher_, runtime_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *api_, *this); - } - void setupDontReuseConnection() { const std::string yaml = R"EOF( timeout: 1s @@ -575,59 +552,6 @@ TEST_F(RedisHealthCheckerTest, LogInitialFailure) { EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.network_failure").value()); } -TEST_F(RedisHealthCheckerTest, DEPRECATED_FEATURE_TEST(ExistsDeprecated)) { - TestDeprecatedV2Api _deprecated_v2_api; - InSequence s; - setupExistsHealthcheckDeprecated(false); - - cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; - - expectSessionCreate(); - expectClientCreate(); - expectExistsRequestCreate(); - health_checker_->start(); - - client_->runHighWatermarkCallbacks(); - client_->runLowWatermarkCallbacks(); - - // Success - EXPECT_CALL(*timeout_timer_, disableTimer()); - EXPECT_CALL(*interval_timer_, enableTimer(_, _)); - NetworkFilters::Common::Redis::RespValuePtr response( - new NetworkFilters::Common::Redis::RespValue()); - response->type(NetworkFilters::Common::Redis::RespType::Integer); - response->asInteger() = 0; - pool_callbacks_->onResponse(std::move(response)); - - expectExistsRequestCreate(); - interval_timer_->invokeCallback(); - - // Failure, exists - EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); - EXPECT_CALL(*timeout_timer_, disableTimer()); - EXPECT_CALL(*interval_timer_, enableTimer(_, _)); - response = std::make_unique(); - response->type(NetworkFilters::Common::Redis::RespType::Integer); - response->asInteger() = 1; - pool_callbacks_->onResponse(std::move(response)); - - expectExistsRequestCreate(); - interval_timer_->invokeCallback(); - - // Failure, no value - EXPECT_CALL(*timeout_timer_, disableTimer()); - EXPECT_CALL(*interval_timer_, enableTimer(_, _)); - response = std::make_unique(); - pool_callbacks_->onResponse(std::move(response)); - - EXPECT_CALL(*client_, close()); - - EXPECT_EQ(3UL, cluster_->info_->stats_store_.counter("health_check.attempt").value()); - EXPECT_EQ(1UL, cluster_->info_->stats_store_.counter("health_check.success").value()); - EXPECT_EQ(2UL, cluster_->info_->stats_store_.counter("health_check.failure").value()); -} - TEST_F(RedisHealthCheckerTest, Exists) { InSequence s; setupExistsHealthcheck(); diff --git a/test/extensions/io_socket/user_space/BUILD b/test/extensions/io_socket/user_space/BUILD index 989d56ec9466a..458d6345d36da 100644 --- a/test/extensions/io_socket/user_space/BUILD +++ b/test/extensions/io_socket/user_space/BUILD @@ -39,3 +39,22 @@ envoy_extension_cc_test( "//test/mocks/event:event_mocks", ], ) + +envoy_extension_cc_test( + name = "connection_compatbility_test", + srcs = ["connection_compatbility_test.cc"], + extension_names = ["envoy.io_socket.user_space"], + deps = [ + "//source/common/buffer:buffer_lib", + "//source/common/common:utility_lib", + "//source/common/event:dispatcher_includes", + "//source/common/network:address_lib", + "//source/common/network:connection_lib", + "//source/common/network:listen_socket_lib", + "//source/extensions/io_socket/user_space:io_handle_impl_lib", + "//test/mocks/api:api_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/network:network_mocks", + "//test/test_common:network_utility_lib", + ], +) diff --git a/test/extensions/io_socket/user_space/connection_compatbility_test.cc b/test/extensions/io_socket/user_space/connection_compatbility_test.cc new file mode 100644 index 0000000000000..e7410aa7bb3dc --- /dev/null +++ b/test/extensions/io_socket/user_space/connection_compatbility_test.cc @@ -0,0 +1,100 @@ +#include + +#include "source/common/network/address_impl.h" +#include "source/common/network/connection_impl.h" +#include "source/common/network/io_socket_handle_impl.h" +#include "source/common/network/listen_socket_impl.h" +#include "source/common/network/raw_buffer_socket.h" +#include "source/common/network/utility.h" +#include "source/extensions/io_socket/user_space/io_handle_impl.h" + +#include "test/mocks/api/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::Invoke; + +namespace Envoy { +namespace Extensions { +namespace IoSocket { +namespace UserSpace { +namespace { + +// This class verifies client connection can be established with user space socket. +class InternalClientConnectionImplTest : public testing::Test { +public: + InternalClientConnectionImplTest() + : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("test_thread")) {} + + void SetUp() override { + std::tie(io_handle_, io_handle_peer_) = IoHandleFactory::createIoHandlePair(); + local_addr_ = io_handle_->localAddress(); + remote_addr_ = io_handle_->peerAddress(); + } + Api::ApiPtr api_; + Event::DispatcherPtr dispatcher_; + std::unique_ptr io_handle_; + std::unique_ptr io_handle_peer_; + Network::MockConnectionCallbacks connection_callbacks; + std::unique_ptr client_; + Network::Address::InstanceConstSharedPtr local_addr_; + Network::Address::InstanceConstSharedPtr remote_addr_; +}; + +TEST_F(InternalClientConnectionImplTest, Basic) { + client_ = std::make_unique( + *dispatcher_, + std::make_unique(std::move(io_handle_), local_addr_, + remote_addr_), + nullptr, std::make_unique(), nullptr); + client_->connect(); + client_->noDelay(true); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + client_->close(Network::ConnectionCloseType::NoFlush); +} + +TEST_F(InternalClientConnectionImplTest, ConnectCallbacksAreInvoked) { + client_ = std::make_unique( + *dispatcher_, + std::make_unique(std::move(io_handle_), local_addr_, + remote_addr_), + nullptr, std::make_unique(), nullptr); + client_->addConnectionCallbacks(connection_callbacks); + client_->connect(); + client_->noDelay(true); + EXPECT_CALL(connection_callbacks, onEvent(_)) + .WillOnce(Invoke([&](Network::ConnectionEvent event) -> void { + EXPECT_EQ(event, Network::ConnectionEvent::Connected); + dispatcher_->exit(); + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); + + client_->close(Network::ConnectionCloseType::NoFlush); +} + +TEST_F(InternalClientConnectionImplTest, ConnectFailed) { + client_ = std::make_unique( + *dispatcher_, + std::make_unique(std::move(io_handle_), local_addr_, + remote_addr_), + nullptr, std::make_unique(), nullptr); + client_->addConnectionCallbacks(connection_callbacks); + client_->connect(); + client_->noDelay(true); + + io_handle_peer_->close(); + EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + client_->close(Network::ConnectionCloseType::NoFlush); +} +} // namespace +} // namespace UserSpace +} // namespace IoSocket +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/io_socket/user_space/io_handle_impl_test.cc b/test/extensions/io_socket/user_space/io_handle_impl_test.cc index fab734bb395d6..d75246348a8f3 100644 --- a/test/extensions/io_socket/user_space/io_handle_impl_test.cc +++ b/test/extensions/io_socket/user_space/io_handle_impl_test.cc @@ -20,6 +20,8 @@ namespace IoSocket { namespace UserSpace { namespace { +constexpr int CONNECTED = 0; + MATCHER(IsInvalidAddress, "") { return arg.err_->getErrorCode() == Api::IoError::IoErrorCode::NoSupport; } @@ -1007,6 +1009,41 @@ TEST_F(IoHandleImplTest, Connect) { auto address_is_ignored = std::make_shared("listener_id"); EXPECT_EQ(0, io_handle_->connect(address_is_ignored).return_value_); + + // Below is emulation of the connect(). + int immediate_error_value = -1; + socklen_t error_value_len = 0; + EXPECT_EQ(0, io_handle_->getOption(SOL_SOCKET, SO_ERROR, &immediate_error_value, &error_value_len) + .return_value_); + EXPECT_EQ(sizeof(int), error_value_len); + EXPECT_EQ(CONNECTED, immediate_error_value); + + // If the peer shutdown write but not yet closes, this io_handle should consider it + // as connected because the socket may be readable. + immediate_error_value = -1; + error_value_len = 0; + EXPECT_EQ(io_handle_peer_->shutdown(ENVOY_SHUT_WR).return_value_, 0); + EXPECT_EQ(0, io_handle_->getOption(SOL_SOCKET, SO_ERROR, &immediate_error_value, &error_value_len) + .return_value_); + EXPECT_EQ(sizeof(int), error_value_len); + EXPECT_EQ(CONNECTED, immediate_error_value); +} + +TEST_F(IoHandleImplTest, ConnectToClosedIoHandle) { + auto address_is_ignored = + std::make_shared("listener_id"); + io_handle_peer_->close(); + auto result = io_handle_->connect(address_is_ignored); + EXPECT_EQ(-1, result.return_value_); + EXPECT_EQ(SOCKET_ERROR_INVAL, result.errno_); + + // Below is emulation of the connect(). + int immediate_error_value = -1; + socklen_t error_value_len = 0; + EXPECT_EQ(0, io_handle_->getOption(SOL_SOCKET, SO_ERROR, &immediate_error_value, &error_value_len) + .return_value_); + EXPECT_EQ(sizeof(int), error_value_len); + EXPECT_NE(CONNECTED, immediate_error_value); } TEST_F(IoHandleImplTest, ActivateEvent) { diff --git a/test/extensions/key_value/file_based/BUILD b/test/extensions/key_value/file_based/BUILD new file mode 100644 index 0000000000000..d55e4a2866e35 --- /dev/null +++ b/test/extensions/key_value/file_based/BUILD @@ -0,0 +1,20 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "key_value_store_test", + srcs = ["key_value_store_test.cc"], + deps = [ + "//source/common/common:key_value_store_lib", + "//source/extensions/key_value/file_based:config_lib", + "//test/mocks/event:event_mocks", + "//test/test_common:file_system_for_test_lib", + ], +) diff --git a/test/extensions/key_value/file_based/key_value_store_test.cc b/test/extensions/key_value/file_based/key_value_store_test.cc new file mode 100644 index 0000000000000..b743c90a81858 --- /dev/null +++ b/test/extensions/key_value/file_based/key_value_store_test.cc @@ -0,0 +1,113 @@ +#include +#include + +#include "source/common/common/key_value_store_base.h" +#include "source/extensions/key_value/file_based/config.h" + +#include "test/mocks/event/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/file_system_for_test.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Extensions { +namespace KeyValue { +namespace { + +class KeyValueStoreTest : public testing::Test { +protected: + KeyValueStoreTest() : filename_(TestEnvironment::temporaryPath("key_value_store")) { + TestEnvironment::removePath(filename_); + createStore(); + } + + void createStore() { + store_ = std::make_unique(dispatcher_, std::chrono::seconds{5}, + Filesystem::fileSystemForTest(), filename_); + } + NiceMock dispatcher_; + std::string filename_; + std::unique_ptr store_{}; +}; + +TEST_F(KeyValueStoreTest, Basic) { + EXPECT_EQ(absl::nullopt, store_->get("foo")); + store_->addOrUpdate("foo", "bar"); + EXPECT_EQ("bar", store_->get("foo").value()); + store_->addOrUpdate("foo", "eep"); + EXPECT_EQ("eep", store_->get("foo").value()); + store_->remove("foo"); + EXPECT_EQ(absl::nullopt, store_->get("foo")); +} + +TEST_F(KeyValueStoreTest, Persist) { + store_->addOrUpdate("foo", "bar"); + store_->addOrUpdate("ba\nz", "ee\np"); + store_->flush(); + + createStore(); + + KeyValueStore::ConstIterateCb validate = [](const std::string& key, const std::string&) { + EXPECT_TRUE(key == "foo" || key == "ba\nz"); + return KeyValueStore::Iterate::Continue; + }; + + EXPECT_EQ("bar", store_->get("foo").value()); + EXPECT_EQ("ee\np", store_->get("ba\nz").value()); + store_->iterate(validate); +} + +TEST_F(KeyValueStoreTest, Iterate) { + store_->addOrUpdate("foo", "bar"); + store_->addOrUpdate("baz", "eep"); + + int full_counter = 0; + KeyValueStore::ConstIterateCb validate = [&full_counter](const std::string& key, + const std::string&) { + ++full_counter; + EXPECT_TRUE(key == "foo" || key == "baz"); + return KeyValueStore::Iterate::Continue; + }; + store_->iterate(validate); + EXPECT_EQ(2, full_counter); + + int stop_early_counter = 0; + KeyValueStore::ConstIterateCb stop_early = [&stop_early_counter](const std::string&, + const std::string&) { + ++stop_early_counter; + return KeyValueStore::Iterate::Break; + }; + store_->iterate(stop_early); + EXPECT_EQ(1, stop_early_counter); +} + +TEST_F(KeyValueStoreTest, HandleBadFile) { + auto checkBadFile = [this](std::string file, std::string error) { + TestEnvironment::writeStringToFileForTest(filename_, file, true); + EXPECT_LOG_CONTAINS("warn", error, createStore()); + + // File will be parsed up until error. + EXPECT_EQ("bar", store_->get("foo").value()); + }; + checkBadFile("3\nfoo3\nbar3", "Bad file: no newline"); + checkBadFile("3\nfoo3\nbar\n", "Bad file: no length"); + checkBadFile("3\nfoo3\nbarasd\n", "Bad file: no length"); + checkBadFile("3\nfoo3\nbar3\na", "Bad file: insufficient contents"); +} + +#ifndef WIN32 +TEST_F(KeyValueStoreTest, HandleInvalidFile) { + filename_ = "/foo"; + createStore(); + EXPECT_LOG_CONTAINS("error", "Failed to flush cache to file /foo", store_->flush()); +} +#endif + +} // namespace +} // namespace KeyValue +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/rate_limit_descriptors/expr/config_test.cc b/test/extensions/rate_limit_descriptors/expr/config_test.cc index 57094c6805e62..805a45bf5fc45 100644 --- a/test/extensions/rate_limit_descriptors/expr/config_test.cc +++ b/test/extensions/rate_limit_descriptors/expr/config_test.cc @@ -26,7 +26,7 @@ class RateLimitPolicyEntryTest : public testing::Test { public: void setupTest(const std::string& yaml) { envoy::config::route::v3::RateLimit rate_limit; - TestUtility::loadFromYaml(yaml, rate_limit, false, true); + TestUtility::loadFromYaml(yaml, rate_limit); TestUtility::validate(rate_limit); rate_limit_entry_ = std::make_unique( rate_limit, ProtobufMessage::getStrictValidationVisitor()); diff --git a/test/extensions/stats_sinks/hystrix/hystrix_test.cc b/test/extensions/stats_sinks/hystrix/hystrix_test.cc index 59d205a387731..98299a662dc01 100644 --- a/test/extensions/stats_sinks/hystrix/hystrix_test.cc +++ b/test/extensions/stats_sinks/hystrix/hystrix_test.cc @@ -521,7 +521,8 @@ TEST_F(HystrixSinkTest, HystrixEventStreamHandler) { ON_CALL(admin_stream_mock, http1StreamEncoderOptions()) .WillByDefault(Return(Http::Http1StreamEncoderOptionsOptRef(stream_encoder_options))); ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_mock)); - connection_mock.stream_info_.downstream_address_provider_->setRemoteAddress(addr_instance_); + connection_mock.stream_info_.downstream_connection_info_provider_->setRemoteAddress( + addr_instance_); EXPECT_CALL(stream_encoder_options, disableChunkEncoding()); ASSERT_EQ(sink_->handlerHystrixEventStream(path_and_query, response_headers, diff --git a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc index 5f1a6724dc8e0..136b4b83d85cd 100644 --- a/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc +++ b/test/extensions/stats_sinks/metrics_service/grpc_metrics_service_impl_test.cc @@ -32,8 +32,7 @@ class GrpcMetricsStreamerImplTest : public testing::Test { GrpcMetricsStreamerImplTest() { streamer_ = std::make_unique( - Grpc::RawAsyncClientSharedPtr{async_client_}, local_info_, - envoy::config::core::v3::ApiVersion::AUTO); + Grpc::RawAsyncClientSharedPtr{async_client_}, local_info_); } void expectStreamStart(MockMetricsStream& stream, MetricsServiceCallbacks** callbacks_to_set) { diff --git a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc index 2f43d87e8cc77..8c7d8a16a854b 100644 --- a/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc +++ b/test/extensions/stats_sinks/metrics_service/metrics_service_integration_test.cc @@ -18,7 +18,7 @@ using testing::AssertionResult; namespace Envoy { namespace { -class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, +class MetricsServiceIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { public: MetricsServiceIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) {} @@ -29,9 +29,6 @@ class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegratio } void initialize() override { - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { - config_helper_.enableDeprecatedV2Api(); - } config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { // metrics_service cluster for Envoy gRPC. auto* metrics_service_cluster = bootstrap.mutable_static_resources()->add_clusters(); @@ -44,7 +41,7 @@ class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegratio envoy::config::metrics::v3::MetricsServiceConfig config; setGrpcService(*config.mutable_grpc_service(), "metrics_service", fake_upstreams_.back()->localAddress()); - config.set_transport_api_version(apiVersion()); + config.set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); metrics_sink->mutable_typed_config()->PackFrom(config); // Shrink reporting period down to 1s to make test not take forever. bootstrap.mutable_stats_flush_interval()->CopyFrom( @@ -82,8 +79,7 @@ class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegratio envoy::service::metrics::v3::StreamMetricsMessage request_msg; VERIFY_ASSERTION(metrics_service_request_->waitForGrpcMessage(*dispatcher_, request_msg)); EXPECT_EQ("POST", metrics_service_request_->headers().getMethodValue()); - EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.metrics.{}.MetricsService", - "StreamMetrics", apiVersion()), + EXPECT_EQ("/envoy.service.metrics.v3.MetricsService/StreamMetrics", metrics_service_request_->headers().getPathValue()); EXPECT_EQ("application/grpc", metrics_service_request_->headers().getContentTypeValue()); EXPECT_TRUE(request_msg.envoy_metrics_size() > 0); @@ -149,12 +145,11 @@ class MetricsServiceIntegrationTest : public Grpc::VersionedGrpcClientIntegratio }; INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, MetricsServiceIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); // Test a basic metric service flow. TEST_P(MetricsServiceIntegrationTest, BasicFlow) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Send an empty request so that histogram values merged for cluster_0. codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); diff --git a/test/extensions/tracers/zipkin/config_test.cc b/test/extensions/tracers/zipkin/config_test.cc index c3f9a71e46e68..6c95bf964b4e4 100644 --- a/test/extensions/tracers/zipkin/config_test.cc +++ b/test/extensions/tracers/zipkin/config_test.cc @@ -29,7 +29,7 @@ TEST(ZipkinTracerConfigTest, ZipkinHttpTracer) { typed_config: "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: fake_cluster - collector_endpoint: /api/v1/spans + collector_endpoint: /api/v2/spans collector_endpoint_version: HTTP_JSON )EOF"; diff --git a/test/extensions/tracers/zipkin/span_buffer_test.cc b/test/extensions/tracers/zipkin/span_buffer_test.cc index 9ccb9c7b3fc03..92c8a7960b864 100644 --- a/test/extensions/tracers/zipkin/span_buffer_test.cc +++ b/test/extensions/tracers/zipkin/span_buffer_test.cc @@ -157,68 +157,72 @@ TEST(ZipkinSpanBufferTest, TestSerializeTimestamp) { } TEST(ZipkinSpanBufferTest, ConstructBuffer) { - const std::string expected1 = - withDefaultTimestampAndDuration(R"([{"traceId":"0000000000000001",)" - R"("name":"",)" - R"("id":"0000000000000001",)" - R"("duration":DEFAULT_TEST_DURATION,)" - R"("annotations":[{"timestamp":ANNOTATION_TEST_TIMESTAMP,)" - R"("value":"cs",)" - R"("endpoint":{"ipv4":"1.2.3.4",)" - R"("port":8080,)" - R"("serviceName":"service1"}},)" - R"({"timestamp":ANNOTATION_TEST_TIMESTAMP,)" - R"("value":"sr",)" - R"("endpoint":{"ipv4":"1.2.3.4",)" - R"("port":8080,)" - R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"response_size",)" - R"("value":"DEFAULT_TEST_DURATION"}]}])"); - - const std::string expected2 = - withDefaultTimestampAndDuration(R"([{"traceId":"0000000000000001",)" - R"("name":"",)" - R"("id":"0000000000000001",)" - R"("duration":DEFAULT_TEST_DURATION,)" - R"("annotations":[{"timestamp":ANNOTATION_TEST_TIMESTAMP,)" - R"("value":"cs",)" - R"("endpoint":{"ipv4":"1.2.3.4",)" - R"("port":8080,)" - R"("serviceName":"service1"}},)" - R"({"timestamp":ANNOTATION_TEST_TIMESTAMP,)" - R"("value":"sr",)" - R"("endpoint":{"ipv4":"1.2.3.4",)" - R"("port":8080,)" - R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"response_size",)" - R"("value":"DEFAULT_TEST_DURATION"}]},)" - R"({"traceId":"0000000000000001",)" - R"("name":"",)" - R"("id":"0000000000000001",)" - R"("duration":DEFAULT_TEST_DURATION,)" - R"("annotations":[{"timestamp":ANNOTATION_TEST_TIMESTAMP,)" - R"("value":"cs",)" - R"("endpoint":{"ipv4":"1.2.3.4",)" - R"("port":8080,)" - R"("serviceName":"service1"}},)" - R"({"timestamp":ANNOTATION_TEST_TIMESTAMP,)" - R"("value":"sr",)" - R"("endpoint":{"ipv4":"1.2.3.4",)" - R"("port":8080,)" - R"("serviceName":"service1"}}],)" - R"("binaryAnnotations":[{"key":"response_size",)" - R"("value":"DEFAULT_TEST_DURATION"}]}])"); + const std::string expected = "[{" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"CLIENT",)" + R"("timestamp":ANNOTATION_TEST_TIMESTAMP,)" + R"("duration":DEFAULT_TEST_DURATION,)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" + R"({)" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"SERVER",)" + R"("timestamp":ANNOTATION_TEST_TIMESTAMP,)" + R"("duration":DEFAULT_TEST_DURATION,)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("response_size":"DEFAULT_TEST_DURATION"},)" + R"("shared":true)" + "}]"; const bool shared = true; const bool delay_allocation = true; - SpanBuffer buffer1(envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1, - shared); - expectSerializedBuffer(buffer1, delay_allocation, {expected1, expected2}); + SpanBuffer buffer1(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON, shared); + expectSerializedBuffer(buffer1, delay_allocation, {expected}); - // Prepare 3 slots, since we will add one more inside the `expectSerializedBuffer` function. - SpanBuffer buffer2(envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1, - shared, 3); - expectSerializedBuffer(buffer2, !delay_allocation, {expected1, expected2}); + // Prepare 2 slots, since we will add one more inside the `expectSerializedBuffer` function. + // SpanBuffer + SpanBuffer buffer2(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON, shared, 2); + expectSerializedBuffer(buffer2, !delay_allocation, {expected}); + + const std::string expected2 = "[{" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"CLIENT",)" + R"("timestamp":ANNOTATION_TEST_TIMESTAMP,)" + R"("duration":DEFAULT_TEST_DURATION,)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("response_size":"DEFAULT_TEST_DURATION"}},)" + R"({)" + R"("traceId":"0000000000000001",)" + R"("id":"0000000000000001",)" + R"("kind":"SERVER",)" + R"("timestamp":ANNOTATION_TEST_TIMESTAMP,)" + R"("duration":DEFAULT_TEST_DURATION,)" + R"("localEndpoint":{)" + R"("serviceName":"service1",)" + R"("ipv4":"1.2.3.4",)" + R"("port":8080},)" + R"("tags":{)" + R"("response_size":"DEFAULT_TEST_DURATION"},)" + "}]"; + + // Test the buffer construct when `shared_span_context` is set to false + SpanBuffer buffer3(envoy::config::trace::v3::ZipkinConfig::HTTP_JSON, !shared); + expectSerializedBuffer(buffer3, delay_allocation, {expected2}); } TEST(ZipkinSpanBufferTest, SerializeSpan) { @@ -455,16 +459,15 @@ TEST(ZipkinSpanBufferTest, TestSerializeTimestampInTheFuture) { Not(HasSubstr(R"("duration":2.584324295476870e+15)"))); EXPECT_THAT(bufferDeprecatedJsonV1.serialize(), Not(HasSubstr(R"("duration":"2584324295476870")"))); +} - SpanBuffer bufferJsonV2( - envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1, true, 2); - bufferJsonV2.addSpan(createSpan({"cs"}, IpType::V4)); - EXPECT_THAT(bufferJsonV2.serialize(), HasSubstr(R"("timestamp":1584324295476871)")); - EXPECT_THAT(bufferJsonV2.serialize(), Not(HasSubstr(R"("timestamp":1.58432429547687e+15)"))); - EXPECT_THAT(bufferJsonV2.serialize(), Not(HasSubstr(R"("timestamp":"1584324295476871")"))); - EXPECT_THAT(bufferJsonV2.serialize(), HasSubstr(R"("duration":2584324295476870)")); - EXPECT_THAT(bufferJsonV2.serialize(), Not(HasSubstr(R"("duration":2.584324295476870e+15)"))); - EXPECT_THAT(bufferJsonV2.serialize(), Not(HasSubstr(R"("duration":"2584324295476870")"))); +TEST(ZipkinSpanBufferTest, TestDeprecationOfHttpJsonV1) { + EXPECT_THROW_WITH_MESSAGE( + SpanBuffer buffer1( + envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1, false), + Envoy::EnvoyException, + "hidden_envoy_deprecated_HTTP_JSON_V1 has been deprecated. Please use a non-default " + "envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion value."); } } // namespace diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index 059fb6063a0b7..93cb4bafbfc2c 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -65,7 +65,7 @@ class ZipkinDriverTest : public testing::Test { std::string yaml_string = fmt::format(R"EOF( collector_cluster: fake_cluster - collector_endpoint: /api/v1/spans + collector_endpoint: /api/v2/spans collector_endpoint_version: {} )EOF", version); @@ -99,7 +99,7 @@ class ZipkinDriverTest : public testing::Test { callback = &callbacks; const std::string& expected_hostname = !hostname.empty() ? hostname : "fake_cluster"; - EXPECT_EQ("/api/v1/spans", message->headers().getPathValue()); + EXPECT_EQ("/api/v2/spans", message->headers().getPathValue()); EXPECT_EQ(expected_hostname, message->headers().getHostValue()); EXPECT_EQ(content_type, message->headers().getContentTypeValue()); @@ -180,7 +180,8 @@ TEST_F(ZipkinDriverTest, InitializeDriver) { // Valid config but collector cluster doesn't exists. const std::string yaml_string = R"EOF( collector_cluster: fake_cluster - collector_endpoint: /api/v1/spans + collector_endpoint: /api/v2/spans + collector_endpoint_version: HTTP_JSON )EOF"; envoy::config::trace::v3::ZipkinConfig zipkin_config; TestUtility::loadFromYaml(yaml_string, zipkin_config); @@ -193,7 +194,8 @@ TEST_F(ZipkinDriverTest, InitializeDriver) { cm_.initializeClusters({"fake_cluster"}, {}); const std::string yaml_string = R"EOF( collector_cluster: fake_cluster - collector_endpoint: /api/v1/spans + collector_endpoint: /api/v2/spans + collector_endpoint_version: HTTP_JSON )EOF"; envoy::config::trace::v3::ZipkinConfig zipkin_config; TestUtility::loadFromYaml(yaml_string, zipkin_config); @@ -208,7 +210,8 @@ TEST_F(ZipkinDriverTest, AllowCollectorClusterToBeAddedViaApi) { const std::string yaml_string = R"EOF( collector_cluster: fake_cluster - collector_endpoint: /api/v1/spans + collector_endpoint: /api/v2/spans + collector_endpoint_version: HTTP_JSON )EOF"; envoy::config::trace::v3::ZipkinConfig zipkin_config; TestUtility::loadFromYaml(yaml_string, zipkin_config); @@ -242,7 +245,7 @@ TEST_F(ZipkinDriverTest, FlushOneSpanReportFailure) { const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { callback = &callbacks; - EXPECT_EQ("/api/v1/spans", message->headers().getPathValue()); + EXPECT_EQ("/api/v2/spans", message->headers().getPathValue()); EXPECT_EQ("fake_cluster", message->headers().getHostValue()); EXPECT_EQ("application/json", message->headers().getContentTypeValue()); diff --git a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc index 47cc4937f0f4b..17c0fafd34ee1 100644 --- a/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/transport_sockets/proxy_protocol/proxy_protocol_test.cc @@ -53,10 +53,10 @@ class ProxyProtocolTest : public testing::Test { // Test injects PROXY protocol header only once TEST_F(ProxyProtocolTest, InjectesHeaderOnlyOnce) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://172.0.0.1:80")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://172.0.0.1:80")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); @@ -83,10 +83,10 @@ TEST_F(ProxyProtocolTest, InjectesHeaderOnlyOnce) { // Test returned bytes processed includes the PROXY protocol header TEST_F(ProxyProtocolTest, BytesProcessedIncludesProxyProtocolHeader) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://172.0.0.1:80")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://172.0.0.1:80")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); @@ -116,10 +116,10 @@ TEST_F(ProxyProtocolTest, BytesProcessedIncludesProxyProtocolHeader) { // Test returns KeepOpen action when write error is Again TEST_F(ProxyProtocolTest, ReturnsKeepOpenWhenWriteErrorIsAgain) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://172.0.0.1:80")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://172.0.0.1:80")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); @@ -152,10 +152,10 @@ TEST_F(ProxyProtocolTest, ReturnsKeepOpenWhenWriteErrorIsAgain) { // Test returns Close action when write error is not Again TEST_F(ProxyProtocolTest, ReturnsCloseWhenWriteErrorIsNotAgain) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://172.0.0.1:80")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://172.0.0.1:80")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); @@ -178,10 +178,10 @@ TEST_F(ProxyProtocolTest, ReturnsCloseWhenWriteErrorIsNotAgain) { // Test injects V1 PROXY protocol using upstream addresses when transport options are null TEST_F(ProxyProtocolTest, V1IPV4LocalAddressWhenTransportOptionsAreNull) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://172.0.0.1:80")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://172.0.0.1:80")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); @@ -201,10 +201,10 @@ TEST_F(ProxyProtocolTest, V1IPV4LocalAddressWhenTransportOptionsAreNull) { // Test injects V1 PROXY protocol using upstream addresses when header options are null TEST_F(ProxyProtocolTest, V1IPV4LocalAddressesWhenHeaderOptionsAreNull) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://172.0.0.1:80")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://172.0.0.1:80")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("174.2.2.222", "172.0.0.1", 50000, 80, Network::Address::IpVersion::v4, expected_buff); @@ -225,10 +225,10 @@ TEST_F(ProxyProtocolTest, V1IPV4LocalAddressesWhenHeaderOptionsAreNull) { // Test injects V1 PROXY protocol using upstream addresses when header options are null TEST_F(ProxyProtocolTest, V1IPV6LocalAddressesWhenHeaderOptionsAreNull) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://[a:b:c:d::]:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://[a:b:c:d::]:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("a:b:c:d::", "e:b:c:f::", 50000, 8080, Network::Address::IpVersion::v6, expected_buff); @@ -258,10 +258,10 @@ TEST_F(ProxyProtocolTest, V1IPV4DownstreamAddresses) { "", std::vector{}, std::vector{}, std::vector{}, absl::optional( Network::ProxyProtocolData{src_addr, dst_addr})); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://172.0.0.1:8080")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://174.2.2.222:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://172.0.0.1:8080")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("202.168.0.13", "174.2.2.222", 52000, 80, Network::Address::IpVersion::v4, expected_buff); @@ -290,10 +290,10 @@ TEST_F(ProxyProtocolTest, V1IPV6DownstreamAddresses) { "", std::vector{}, std::vector{}, std::vector{}, absl::optional( Network::ProxyProtocolData{src_addr, dst_addr})); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://[a:b:c:d::]:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://[a:b:c:d::]:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV1Header("1::2:3", "a:b:c:d::", 52000, 80, Network::Address::IpVersion::v6, expected_buff); @@ -313,10 +313,10 @@ TEST_F(ProxyProtocolTest, V1IPV6DownstreamAddresses) { // Test injects V2 PROXY protocol using upstream addresses when transport options are null TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenTransportOptionsAreNull) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://1.2.3.4:773")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://0.1.1.2:513")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://1.2.3.4:773")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://0.1.1.2:513")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV2LocalHeader(expected_buff); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, nullptr); @@ -335,10 +335,10 @@ TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenTransportOptionsAreNull) { // Test injects V2 PROXY protocol using upstream addresses when header options are null TEST_F(ProxyProtocolTest, V2IPV4LocalCommandWhenHeaderOptionsAreNull) { - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://1.2.3.4:773")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://0.1.1.2:513")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://1.2.3.4:773")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://0.1.1.2:513")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV2LocalHeader(expected_buff); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, @@ -367,10 +367,10 @@ TEST_F(ProxyProtocolTest, V2IPV4DownstreamAddresses) { "", std::vector{}, std::vector{}, std::vector{}, absl::optional( Network::ProxyProtocolData{src_addr, dst_addr})); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://0.1.1.2:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://3.3.3.3:80")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://0.1.1.2:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://3.3.3.3:80")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV2Header("1.2.3.4", "0.1.1.2", 773, 513, Network::Address::IpVersion::v4, expected_buff); @@ -399,10 +399,10 @@ TEST_F(ProxyProtocolTest, V2IPV6DownstreamAddresses) { "", std::vector{}, std::vector{}, std::vector{}, absl::optional( Network::ProxyProtocolData{src_addr, dst_addr})); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://[1:100:200:3::]:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://[1:100:200:3::]:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080")); Buffer::OwnedImpl expected_buff{}; Common::ProxyProtocol::generateV2Header("1:2:3::4", "1:100:200:3::", 8, 2, Network::Address::IpVersion::v6, expected_buff); @@ -431,10 +431,10 @@ TEST_F(ProxyProtocolTest, OnConnectedCallsInnerOnConnected) { "", std::vector{}, std::vector{}, std::vector{}, absl::optional( Network::ProxyProtocolData{src_addr, dst_addr})); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setLocalAddress( - Network::Utility::resolveUrl("tcp://[1:100:200:3::]:50000")); - transport_callbacks_.connection_.stream_info_.downstream_address_provider_->setRemoteAddress( - Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(Network::Utility::resolveUrl("tcp://[1:100:200:3::]:50000")); + transport_callbacks_.connection_.stream_info_.downstream_connection_info_provider_ + ->setRemoteAddress(Network::Utility::resolveUrl("tcp://[e:b:c:f::]:8080")); initialize(ProxyProtocolConfig_Version::ProxyProtocolConfig_Version_V2, socket_options); EXPECT_CALL(*inner_socket_, onConnected()); diff --git a/test/extensions/transport_sockets/tap/tap_config_impl_test.cc b/test/extensions/transport_sockets/tap/tap_config_impl_test.cc index 3ed31cd6c2d62..c60e8d898dfc5 100644 --- a/test/extensions/transport_sockets/tap/tap_config_impl_test.cc +++ b/test/extensions/transport_sockets/tap/tap_config_impl_test.cc @@ -47,7 +47,7 @@ class MockSocketTapConfig : public SocketTapConfig { class PerSocketTapperImplTest : public testing::Test { public: void setup(bool streaming) { - connection_.stream_info_.downstream_address_provider_->setLocalAddress( + connection_.stream_info_.downstream_connection_info_provider_->setLocalAddress( std::make_shared("127.0.0.1", 1000)); ON_CALL(connection_, id()).WillByDefault(Return(1)); EXPECT_CALL(*config_, createPerTapSinkHandleManager_(1)).WillOnce(Return(sink_manager_)); diff --git a/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc b/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc index fee175872bd65..68925f05930dd 100644 --- a/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc +++ b/test/extensions/transport_sockets/tls/cert_validator/default_validator_test.cc @@ -43,7 +43,8 @@ TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameDNSMatched) { "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.MergeFrom(TestUtility::createRegexMatcher(".*.example.com")); - std::vector subject_alt_name_matchers; + std::vector> + subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); } @@ -54,7 +55,8 @@ TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameWildcardDNSMatched) { "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("api.example.com"); - std::vector subject_alt_name_matchers; + std::vector> + subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); } @@ -66,7 +68,8 @@ TEST(DefaultCertValidatorTest, TestMultiLevelMatch) { "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("foo.api.example.com"); - std::vector subject_alt_name_matchers; + std::vector> + subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_FALSE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); } @@ -93,7 +96,8 @@ TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameURIMatched) { "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.MergeFrom(TestUtility::createRegexMatcher("spiffe://lyft.com/.*-team")); - std::vector subject_alt_name_matchers; + std::vector> + subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); } @@ -111,7 +115,8 @@ TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameNotMatched) { "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.MergeFrom(TestUtility::createRegexMatcher(".*.foo.com")); - std::vector subject_alt_name_matchers; + std::vector> + subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_FALSE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); } @@ -129,7 +134,7 @@ TEST(DefaultCertValidatorTest, TestCertificateVerificationWithSANMatcher) { "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.MergeFrom(TestUtility::createRegexMatcher(".*.example.com")); - std::vector san_matchers; + std::vector> san_matchers; san_matchers.push_back(Matchers::StringMatcherImpl(matcher)); // Verify the certificate with correct SAN regex matcher. EXPECT_EQ(default_validator->verifyCertificate(cert.get(), /*verify_san_list=*/{}, san_matchers), @@ -137,7 +142,8 @@ TEST(DefaultCertValidatorTest, TestCertificateVerificationWithSANMatcher) { EXPECT_EQ(stats.fail_verify_san_.value(), 0); matcher.MergeFrom(TestUtility::createExactMatcher("hello.example.com")); - std::vector invalid_san_matchers; + std::vector> + invalid_san_matchers; invalid_san_matchers.push_back(Matchers::StringMatcherImpl(matcher)); // Verify the certificate with incorrect SAN exact matcher. EXPECT_EQ(default_validator->verifyCertificate(cert.get(), /*verify_san_list=*/{}, @@ -158,9 +164,10 @@ TEST(DefaultCertValidatorTest, TestCertificateVerificationWithNoValidationContex EXPECT_EQ(default_validator->verifyCertificate(/*cert=*/nullptr, /*verify_san_list=*/{}, /*subject_alt_name_matchers=*/{}), Envoy::Ssl::ClientValidationStatus::NotValidated); - X509 cert = {}; + bssl::UniquePtr cert(X509_new()); EXPECT_EQ(default_validator->doVerifyCertChain(/*store_ctx=*/nullptr, - /*ssl_extended_info=*/nullptr, /*leaf_cert=*/cert, + /*ssl_extended_info=*/nullptr, + /*leaf_cert=*/*cert, /*transport_socket_options=*/nullptr), 0); } diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 72bff36b463bd..5e2de45ee3819 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -623,23 +623,6 @@ TEST_F(SslServerContextImplOcspTest, TestMustStapleCertWithoutStapleConfigFails) "OCSP response is required for must-staple certificate"); } -TEST_F(SslServerContextImplOcspTest, TestMustStapleCertWithoutStapleFeatureFlagOff) { - const std::string tls_context_yaml = R"EOF( - common_tls_context: - tls_certificates: - - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/ocsp/test_data/revoked_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/ocsp/test_data/revoked_key.pem" - ocsp_staple_policy: lenient_stapling - )EOF"; - - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.require_ocsp_response_for_must_staple_certs", "false"}}); - loadConfigYaml(tls_context_yaml); -} - TEST_F(SslServerContextImplOcspTest, TestGetCertInformationWithOCSP) { const std::string yaml = R"EOF( common_tls_context: @@ -716,10 +699,9 @@ class SslServerContextImplTicketTest : public SslContextImplTest { loadConfig(server_context_config); } - void loadConfigYaml(const std::string& yaml, bool avoid_boosting = true) { + void loadConfigYaml(const std::string& yaml) { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; - TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context, false, - avoid_boosting); + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), tls_context); ServerContextConfigImpl cfg(tls_context, factory_context_); loadConfig(cfg); } @@ -1696,7 +1678,7 @@ TEST_F(ServerContextConfigImplTest, PrivateKeyMethodLoadFailureNoProvider) { TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context); EXPECT_THROW_WITH_REGEX( ServerContextConfigImpl server_context_config(tls_context, factory_context_), EnvoyException, - "Failed to load incomplete certificate from "); + "Failed to load private key provider: mock_provider"); } TEST_F(ServerContextConfigImplTest, PrivateKeyMethodLoadFailureNoMethod) { @@ -1767,11 +1749,6 @@ TEST_F(ServerContextConfigImplTest, PrivateKeyMethodLoadFailureBothKeyAndMethod) NiceMock private_key_method_manager; auto private_key_method_provider_ptr = std::make_shared>(); - EXPECT_CALL(factory_context_, sslContextManager()).WillOnce(ReturnRef(context_manager)); - EXPECT_CALL(context_manager, privateKeyMethodManager()) - .WillOnce(ReturnRef(private_key_method_manager)); - EXPECT_CALL(private_key_method_manager, createPrivateKeyMethodProvider(_, _)) - .WillOnce(Return(private_key_method_provider_ptr)); const std::string tls_context_yaml = R"EOF( common_tls_context: tls_certificates: diff --git a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc index 02d5e1240a025..0e1b6d9f14ffe 100644 --- a/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc +++ b/test/extensions/transport_sockets/tls/integration/ssl_integration_test.cc @@ -665,10 +665,12 @@ TEST_P(SslTapIntegrationTest, TwoRequestsWithBinaryProto) { checkStats(); envoy::config::core::v3::Address expected_local_address; Network::Utility::addressToProtobufAddress( - *codec_client_->connection()->addressProvider().remoteAddress(), expected_local_address); + *codec_client_->connection()->connectionInfoProvider().remoteAddress(), + expected_local_address); envoy::config::core::v3::Address expected_remote_address; Network::Utility::addressToProtobufAddress( - *codec_client_->connection()->addressProvider().localAddress(), expected_remote_address); + *codec_client_->connection()->connectionInfoProvider().localAddress(), + expected_remote_address); codec_client_->close(); test_server_->waitForCounterGe("http.config_test.downstream_cx_destroy", 1); envoy::data::tap::v3::TraceWrapper trace; diff --git a/test/extensions/transport_sockets/tls/io_handle_bio_test.cc b/test/extensions/transport_sockets/tls/io_handle_bio_test.cc index 155e8c0b07086..36a2625314a26 100644 --- a/test/extensions/transport_sockets/tls/io_handle_bio_test.cc +++ b/test/extensions/transport_sockets/tls/io_handle_bio_test.cc @@ -7,6 +7,7 @@ #include "gtest/gtest.h" #include "openssl/ssl.h" +using testing::_; using testing::NiceMock; using testing::Return; @@ -24,6 +25,17 @@ class IoHandleBioTest : public testing::Test { NiceMock io_handle_; }; +TEST_F(IoHandleBioTest, WriteError) { + EXPECT_CALL(io_handle_, writev(_, 1)) + .WillOnce(Return(testing::ByMove( + Api::IoCallUint64Result(0, Api::IoErrorPtr(new Network::IoSocketError(100), + Network::IoSocketError::deleteIoError))))); + EXPECT_EQ(-1, bio_->method->bwrite(bio_, nullptr, 10)); + const int err = ERR_get_error(); + EXPECT_EQ(ERR_GET_LIB(err), ERR_LIB_SYS); + EXPECT_EQ(ERR_GET_REASON(err), 100); +} + TEST_F(IoHandleBioTest, TestMiscApis) { EXPECT_EQ(bio_->method->destroy(nullptr), 0); EXPECT_EQ(bio_->method->bread(nullptr, nullptr, 0), 0); diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 0e501606c5658..db8cd6b6cec52 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -256,6 +256,22 @@ class TestUtilOptions : public TestUtilOptionsBase { bool ocspStaplingEnabled() const { return ocsp_stapling_enabled_; } + TestUtilOptions& setExpectedTransportFailureReasonContains( + const std::string& expected_transport_failure_reason_contains) { + expected_transport_failure_reason_contains_ = expected_transport_failure_reason_contains; + return *this; + } + + const std::string& expectedTransportFailureReasonContains() const { + return expected_transport_failure_reason_contains_; + } + + TestUtilOptions& setNotExpectedClientStats(const std::string& stat) { + not_expected_client_stats_ = stat; + return *this; + } + const std::string& notExpectedClientStats() const { return not_expected_client_stats_; } + private: const std::string client_ctx_yaml_; const std::string server_ctx_yaml_; @@ -277,6 +293,8 @@ class TestUtilOptions : public TestUtilOptionsBase { std::string expected_expiration_peer_cert_; std::string expected_ocsp_response_; bool ocsp_stapling_enabled_{false}; + std::string expected_transport_failure_reason_contains_; + std::string not_expected_client_stats_; }; void testUtil(const TestUtilOptions& options) { @@ -333,7 +351,7 @@ void testUtil(const TestUtilOptions& options) { ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), client_ssl_socket_factory.createTransportSocket(nullptr), nullptr); Network::ConnectionPtr server_connection; Network::MockConnectionCallbacks server_connection_callbacks; @@ -482,6 +500,19 @@ void testUtil(const TestUtilOptions& options) { if (!options.expectedServerStats().empty()) { EXPECT_EQ(1UL, server_stats_store.counter(options.expectedServerStats()).value()); } + + if (!options.notExpectedClientStats().empty()) { + EXPECT_EQ(0, client_stats_store.counter(options.notExpectedClientStats()).value()); + } + + if (options.expectSuccess()) { + EXPECT_EQ("", client_connection->transportFailureReason()); + EXPECT_EQ("", server_connection->transportFailureReason()); + } else { + EXPECT_THAT(std::string(client_connection->transportFailureReason()), + ContainsRegex(options.expectedTransportFailureReasonContains())); + EXPECT_NE("", server_connection->transportFailureReason()); + } } /** @@ -645,7 +676,7 @@ void testUtilV2(const TestUtilOptionsV2& options) { ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), client_ssl_socket_factory.createTransportSocket(options.transportSocketOptions()), nullptr); if (!options.clientSession().empty()) { @@ -2423,7 +2454,7 @@ TEST_P(SslSocketTest, FlushCloseDuringHandshake) { Network::ListenerPtr listener = dispatcher_->createListener(socket, callbacks, true); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); Network::MockConnectionCallbacks client_connection_callbacks; @@ -2490,7 +2521,7 @@ TEST_P(SslSocketTest, HalfClose) { ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), client_ssl_socket_factory.createTransportSocket(nullptr), nullptr); client_connection->enableHalfClose(true); client_connection->addReadFilter(client_read_filter); @@ -2571,7 +2602,7 @@ TEST_P(SslSocketTest, ShutdownWithCloseNotify) { ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), client_ssl_socket_factory.createTransportSocket(nullptr), nullptr); Network::MockConnectionCallbacks client_connection_callbacks; client_connection->enableHalfClose(true); @@ -2658,7 +2689,7 @@ TEST_P(SslSocketTest, ShutdownWithoutCloseNotify) { ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), client_ssl_socket_factory.createTransportSocket(nullptr), nullptr); Network::MockConnectionCallbacks client_connection_callbacks; client_connection->enableHalfClose(true); @@ -2763,7 +2794,7 @@ TEST_P(SslSocketTest, ClientAuthMultipleCAs) { Stats::TestUtil::TestStore client_stats_store; ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), ssl_socket_factory.createTransportSocket(nullptr), nullptr); // Verify that server sent list with 2 acceptable client certificate CA names. @@ -2859,7 +2890,7 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, std::make_unique(client_tls_context, client_factory_context); ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection( - socket1->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket1->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), ssl_socket_factory.createTransportSocket(nullptr), nullptr); Network::MockConnectionCallbacks client_connection_callbacks; @@ -2872,7 +2903,8 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { Network::TransportSocketFactory& tsf = - socket->addressProvider().localAddress() == socket1->addressProvider().localAddress() + socket->connectionInfoProvider().localAddress() == + socket1->connectionInfoProvider().localAddress() ? server_ssl_socket_factory1 : server_ssl_socket_factory2; server_connection = dispatcher->createServerConnection( @@ -2901,7 +2933,7 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, EXPECT_EQ(0UL, client_stats_store.counter("ssl.session_reused").value()); client_connection = dispatcher->createClientConnection( - socket2->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket2->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), ssl_socket_factory.createTransportSocket(nullptr), nullptr); client_connection->addConnectionCallbacks(client_connection_callbacks); const SslHandshakerImpl* ssl_socket = @@ -2916,7 +2948,8 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { Network::TransportSocketFactory& tsf = - socket->addressProvider().localAddress() == socket1->addressProvider().localAddress() + socket->connectionInfoProvider().localAddress() == + socket1->connectionInfoProvider().localAddress() ? server_ssl_socket_factory1 : server_ssl_socket_factory2; server_connection = dispatcher->createServerConnection( @@ -2995,8 +3028,9 @@ void testSupportForStatelessSessionResumption(const std::string& server_ctx_yaml std::make_unique(client_tls_context, client_factory_context); ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection( - tcp_socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), - ssl_socket_factory.createTransportSocket(nullptr), nullptr); + tcp_socket->connectionInfoProvider().localAddress(), + Network::Address::InstanceConstSharedPtr(), ssl_socket_factory.createTransportSocket(nullptr), + nullptr); Network::MockConnectionCallbacks client_connection_callbacks; client_connection->addConnectionCallbacks(client_connection_callbacks); @@ -3439,7 +3473,7 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { Stats::TestUtil::TestStore client_stats_store; ClientSslSocketFactory ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), ssl_socket_factory.createTransportSocket(nullptr), nullptr); Network::MockConnectionCallbacks client_connection_callbacks; @@ -3451,10 +3485,11 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { Network::MockConnectionCallbacks server_connection_callbacks; EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void { - Network::TransportSocketFactory& tsf = accepted_socket->addressProvider().localAddress() == - socket->addressProvider().localAddress() - ? server_ssl_socket_factory - : server2_ssl_socket_factory; + Network::TransportSocketFactory& tsf = + accepted_socket->connectionInfoProvider().localAddress() == + socket->connectionInfoProvider().localAddress() + ? server_ssl_socket_factory + : server2_ssl_socket_factory; server_connection = dispatcher_->createServerConnection( std::move(accepted_socket), tsf.createTransportSocket(nullptr), stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); @@ -3480,7 +3515,7 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { EXPECT_EQ(1UL, client_stats_store.counter("ssl.handshake").value()); client_connection = dispatcher_->createClientConnection( - socket2->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket2->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), ssl_socket_factory.createTransportSocket(nullptr), nullptr); client_connection->addConnectionCallbacks(client_connection_callbacks); const SslHandshakerImpl* ssl_socket = @@ -3492,10 +3527,11 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { EXPECT_CALL(callbacks, onAccept_(_)) .WillOnce(Invoke([&](Network::ConnectionSocketPtr& accepted_socket) -> void { - Network::TransportSocketFactory& tsf = accepted_socket->addressProvider().localAddress() == - socket->addressProvider().localAddress() - ? server_ssl_socket_factory - : server2_ssl_socket_factory; + Network::TransportSocketFactory& tsf = + accepted_socket->connectionInfoProvider().localAddress() == + socket->connectionInfoProvider().localAddress() + ? server_ssl_socket_factory + : server2_ssl_socket_factory; server_connection = dispatcher_->createServerConnection( std::move(accepted_socket), tsf.createTransportSocket(nullptr), stream_info_); server_connection->addConnectionCallbacks(server_connection_callbacks); @@ -3556,7 +3592,7 @@ void SslSocketTest::testClientSessionResumption(const std::string& server_ctx_ya ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, client_stats_store); Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), client_ssl_socket_factory.createTransportSocket(nullptr), nullptr); Network::MockConnectionCallbacks client_connection_callbacks; @@ -3618,7 +3654,7 @@ void SslSocketTest::testClientSessionResumption(const std::string& server_ctx_ya close_count = 0; client_connection = dispatcher->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), client_ssl_socket_factory.createTransportSocket(nullptr), nullptr); client_connection->addConnectionCallbacks(client_connection_callbacks); client_connection->connect(); @@ -3799,7 +3835,7 @@ TEST_P(SslSocketTest, SslError) { Network::ListenerPtr listener = dispatcher_->createListener(socket, callbacks, true); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( - socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), + socket->connectionInfoProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), Network::Test::createRawBufferSocket(), nullptr); client_connection->connect(); Buffer::OwnedImpl bad_data("bad_handshake_data"); @@ -4805,7 +4841,7 @@ class SslReadBufferLimitTest : public SslSocketTest { auto transport_socket = client_ssl_socket_factory_->createTransportSocket(nullptr); client_transport_socket_ = transport_socket.get(); client_connection_ = - dispatcher_->createClientConnection(socket_->addressProvider().localAddress(), + dispatcher_->createClientConnection(socket_->connectionInfoProvider().localAddress(), source_address_, std::move(transport_socket), nullptr); client_connection_->addConnectionCallbacks(client_callbacks_); client_connection_->connect(); @@ -5026,7 +5062,7 @@ TEST_P(SslReadBufferLimitTest, TestBind) { dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_EQ(address_string, - server_connection_->addressProvider().remoteAddress()->ip()->addressAsString()); + server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString()); disconnect(); } @@ -5428,7 +5464,9 @@ TEST_P(SslSocketTest, RsaPrivateKeyProviderAsyncDecryptCompleteFailure) { TestUtilOptions failing_test_options(failing_client_ctx_yaml, server_ctx_yaml, false, GetParam()); testUtil(failing_test_options.setPrivateKeyMethodExpected(true) .setExpectedServerCloseEvent(Network::ConnectionEvent::LocalClose) - .setExpectedServerStats("ssl.connection_error")); + .setExpectedServerStats("ssl.connection_error") + .setExpectedTransportFailureReasonContains("system library") + .setNotExpectedClientStats("ssl.connection_error")); } // Test having one cert with private key method and another with just @@ -5811,69 +5849,6 @@ TEST_P(SslSocketTest, TestConnectionFailsWhenCertIsMustStapleAndResponseExpired) testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_failed").enableOcspStapling()); } -TEST_P(SslSocketTest, TestConnectionSucceedsForMustStapleCertExpirationValidationOff) { - const std::string server_ctx_yaml = R"EOF( - common_tls_context: - tls_certificates: - - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/ocsp/test_data/revoked_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/ocsp/test_data/revoked_key.pem" - ocsp_staple: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/ocsp/test_data/revoked_ocsp_resp.der" - ocsp_staple_policy: must_staple - )EOF"; - - const std::string client_ctx_yaml = R"EOF( - common_tls_context: - tls_params: - cipher_suites: - - TLS_RSA_WITH_AES_128_GCM_SHA256 -)EOF"; - - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.check_ocsp_policy", "false"}}); - - TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); - std::string ocsp_response_path = - "{{ test_rundir " - "}}/test/extensions/transport_sockets/tls/ocsp/test_data/revoked_ocsp_resp.der"; - std::string expected_response = - TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(ocsp_response_path)); - testUtil(test_options.enableOcspStapling() - .setExpectedServerStats("ssl.ocsp_staple_responses") - .setExpectedOcspResponse(expected_response)); -} - -TEST_P(SslSocketTest, TestConnectionSucceedsForMustStapleCertNoValidationNoResponse) { - const std::string server_ctx_yaml = R"EOF( - common_tls_context: - tls_certificates: - - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/ocsp/test_data/revoked_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/ocsp/test_data/revoked_key.pem" - ocsp_staple_policy: lenient_stapling - )EOF"; - - const std::string client_ctx_yaml = R"EOF( - common_tls_context: - tls_params: - cipher_suites: - - TLS_RSA_WITH_AES_128_GCM_SHA256 -)EOF"; - - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.require_ocsp_response_for_must_staple_certs", "false"}, - {"envoy.reloadable_features.check_ocsp_policy", "false"}}); - TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); - testUtil(test_options.setExpectedServerStats("ssl.ocsp_staple_omitted") - .enableOcspStapling() - .setExpectedOcspResponse("")); -} - TEST_P(SslSocketTest, TestFilterMultipleCertsFilterByOcspPolicyFallbackOnFirst) { const std::string server_ctx_yaml = R"EOF( common_tls_context: diff --git a/test/extensions/upstreams/http/tcp/upstream_request_test.cc b/test/extensions/upstreams/http/tcp/upstream_request_test.cc index 4ceaa9f3dddac..c5310d25fe1be 100644 --- a/test/extensions/upstreams/http/tcp/upstream_request_test.cc +++ b/test/extensions/upstreams/http/tcp/upstream_request_test.cc @@ -148,10 +148,10 @@ TEST_F(TcpUpstreamTest, V1Header) { envoy::config::core::v3::ProxyProtocolConfig* proxy_config = mock_router_filter_.route_entry_.connect_config_->mutable_proxy_protocol_config(); proxy_config->set_version(envoy::config::core::v3::ProxyProtocolConfig::V1); - mock_router_filter_.client_connection_.stream_info_.downstream_address_provider_ + mock_router_filter_.client_connection_.stream_info_.downstream_connection_info_provider_ ->setRemoteAddress(std::make_shared("1.2.3.4", 5)); - mock_router_filter_.client_connection_.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("4.5.6.7", 8)); + mock_router_filter_.client_connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(std::make_shared("4.5.6.7", 8)); Buffer::OwnedImpl expected_data; Extensions::Common::ProxyProtocol::generateProxyProtoHeader( @@ -171,10 +171,10 @@ TEST_F(TcpUpstreamTest, V2Header) { envoy::config::core::v3::ProxyProtocolConfig* proxy_config = mock_router_filter_.route_entry_.connect_config_->mutable_proxy_protocol_config(); proxy_config->set_version(envoy::config::core::v3::ProxyProtocolConfig::V2); - mock_router_filter_.client_connection_.stream_info_.downstream_address_provider_ + mock_router_filter_.client_connection_.stream_info_.downstream_connection_info_provider_ ->setRemoteAddress(std::make_shared("1.2.3.4", 5)); - mock_router_filter_.client_connection_.stream_info_.downstream_address_provider_->setLocalAddress( - std::make_shared("4.5.6.7", 8)); + mock_router_filter_.client_connection_.stream_info_.downstream_connection_info_provider_ + ->setLocalAddress(std::make_shared("4.5.6.7", 8)); Buffer::OwnedImpl expected_data; Extensions::Common::ProxyProtocol::generateProxyProtoHeader( diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index b478b9eab1faf..1c13400450046 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -165,14 +165,14 @@ inline std::unique_ptr fromStreamInfo(const test::fuzz::StreamIn ? Envoy::Network::Address::resolveProtoAddress(stream_info.upstream_local_address()) : Network::Utility::resolveUrl("tcp://10.0.0.1:10000"); test_stream_info->upstream_local_address_ = upstream_local_address; - test_stream_info->downstream_address_provider_ = - std::make_shared(address, address); - test_stream_info->downstream_address_provider_->setRequestedServerName( + test_stream_info->downstream_connection_info_provider_ = + std::make_shared(address, address); + test_stream_info->downstream_connection_info_provider_->setRequestedServerName( stream_info.requested_server_name()); auto connection_info = std::make_shared>(); ON_CALL(*connection_info, subjectPeerCertificate()) .WillByDefault(testing::ReturnRef(TestSubjectPeer)); - test_stream_info->setDownstreamSslConnection(connection_info); + test_stream_info->downstream_connection_info_provider_->setSslConnection(connection_info); return test_stream_info; } diff --git a/test/integration/BUILD b/test/integration/BUILD index f1fdda3435d77..247216e7340d5 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -32,11 +32,11 @@ envoy_cc_test_library( ":http_integration_lib", "//source/common/common:matchers_lib", "//source/common/config:protobuf_link_hacks", - "//source/common/config:version_converter_lib", "//source/common/protobuf:utility_lib", "//source/common/version:version_lib", "//source/extensions/filters/network/redis_proxy:config", "//test/common/grpc:grpc_client_integration_lib", + "//test/config:v2_link_hacks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", @@ -120,6 +120,7 @@ envoy_cc_test( "//source/common/config:protobuf_link_hacks", "//source/common/protobuf:utility_lib", "//test/common/grpc:grpc_client_integration_lib", + "//test/config:v2_link_hacks", "//test/mocks/runtime:runtime_mocks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", @@ -213,6 +214,7 @@ envoy_cc_test( "//source/common/config:protobuf_link_hacks", "//source/common/protobuf:utility_lib", "//test/common/grpc:grpc_client_integration_lib", + "//test/config:v2_link_hacks", "//test/mocks/runtime:runtime_mocks", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", @@ -301,6 +303,7 @@ envoy_cc_test( "//source/common/config:api_version_lib", "//source/common/protobuf", "//source/extensions/http/original_ip_detection/xff:config", + "//test/config:v2_link_hacks", "//test/test_common:utility_lib", "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -383,7 +386,9 @@ envoy_cc_test( "buffer_accounting_integration_test.cc", ], deps = [ + ":base_overload_integration_test_lib", ":http_integration_lib", + ":http_protocol_integration_lib", ":socket_interface_swap_lib", ":tracked_watermark_buffer_lib", "//test/mocks/http:http_mocks", @@ -599,6 +604,7 @@ envoy_cc_test_library( "//test/integration/filters:add_body_filter_config_lib", "//test/integration/filters:add_trailers_filter_config_lib", "//test/integration/filters:call_decodedata_once_filter_config_lib", + "//test/integration/filters:crash_filter_config_lib", "//test/integration/filters:decode_headers_return_stop_all_filter_config_lib", "//test/integration/filters:encode_headers_return_stop_all_filter_config_lib", "//test/integration/filters:modify_buffer_filter_config_lib", @@ -745,7 +751,6 @@ envoy_cc_test_library( ":integration_tcp_client_lib", ":utility_lib", "//source/common/config:api_version_lib", - "//source/common/config:version_converter_lib", "//source/extensions/transport_sockets/tls:context_config_lib", "//source/extensions/transport_sockets/tls:context_lib", "//source/extensions/transport_sockets/tls:ssl_socket_lib", @@ -842,6 +847,7 @@ envoy_cc_test_library( "//test/test_common:test_time_system_interface", "//test/test_common:utility_lib", "@com_google_absl//absl/synchronization", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", @@ -880,7 +886,6 @@ envoy_cc_test_library( "//source/common/common:basic_resource_lib", "//source/common/common:minimal_logger_lib", "//source/common/config:api_version_lib", - "//source/common/config:version_converter_lib", "//source/common/event:dispatcher_lib", "//source/common/grpc:codec_lib", "//source/common/grpc:common_lib", @@ -950,8 +955,8 @@ envoy_cc_test( "//test/mocks/http:http_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/grpc_http1_bridge/v2:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", ], ) @@ -1094,11 +1099,43 @@ envoy_cc_test( ], ) +envoy_cc_test_library( + name = "fake_resource_monitor_lib", + srcs = [ + "fake_resource_monitor.cc", + ], + hdrs = [ + "fake_resource_monitor.h", + ], + deps = [ + "//envoy/server:resource_monitor_config_interface", + "//test/common/config:dummy_config_proto_cc_proto", + ], +) + +envoy_cc_test_library( + name = "base_overload_integration_test_lib", + srcs = [ + "base_overload_integration_test.cc", + ], + hdrs = [ + "base_overload_integration_test.h", + ], + deps = [ + ":fake_resource_monitor_lib", + "//test/test_common:registry_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "overload_integration_test", srcs = ["overload_integration_test.cc"], shard_count = 2, deps = [ + ":base_overload_integration_test_lib", ":http_protocol_integration_lib", "//test/common/config:dummy_config_proto_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -1133,6 +1170,7 @@ envoy_cc_test( deps = [ ":http_integration_lib", "//test/common/grpc:grpc_client_integration_lib", + "//test/config:v2_link_hacks", "@envoy_api//envoy/service/runtime/v3:pkg_cc_proto", ], ) @@ -1143,6 +1181,7 @@ envoy_cc_test( deps = [ ":http_integration_lib", "//test/common/grpc:grpc_client_integration_lib", + "//test/config:v2_link_hacks", "//test/integration/filters:set_is_terminal_filter_config_proto_cc_proto", "//test/integration/filters:set_is_terminal_filter_lib", "//test/integration/filters:set_response_code_filter_config_proto_cc_proto", @@ -1216,6 +1255,7 @@ envoy_cc_test( "//source/extensions/transport_sockets/tls:context_config_lib", "//source/extensions/transport_sockets/tls:context_lib", "//test/common/grpc:grpc_client_integration_lib", + "//test/extensions/transport_sockets/tls:test_private_key_method_provider_test_lib", "//test/mocks/runtime:runtime_mocks", "//test/mocks/secret:secret_mocks", "//test/test_common:resources_lib", @@ -1238,6 +1278,7 @@ envoy_cc_test( ":http_integration_lib", "//envoy/registry", "//source/common/grpc:common_lib", + "//test/config:v2_link_hacks", "//test/test_common:registry_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -1340,6 +1381,7 @@ envoy_cc_test_library( deps = [ "//source/common/buffer:watermark_buffer_lib", "//test/test_common:utility_lib", + "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", ], ) @@ -1348,6 +1390,7 @@ envoy_cc_test( srcs = ["tracked_watermark_buffer_test.cc"], deps = [ ":tracked_watermark_buffer_lib", + "//test/mocks/http:stream_reset_handler_mock", "//test/test_common:test_runtime_lib", ], ) @@ -1571,6 +1614,7 @@ envoy_cc_test( "//source/common/network:connection_lib", "//source/common/network:utility_lib", "//test/common/grpc:grpc_client_integration_lib", + "//test/config:v2_link_hacks", "//test/test_common:resources_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", @@ -1596,6 +1640,7 @@ envoy_cc_test( "//source/extensions/filters/http/health_check:config", "//source/extensions/filters/network/tcp_proxy:config", "//test/common/grpc:grpc_client_integration_lib", + "//test/config:v2_link_hacks", "//test/integration/filters:address_restore_listener_filter_lib", "//test/test_common:network_utility_lib", "//test/test_common:resources_lib", @@ -1693,6 +1738,7 @@ envoy_cc_test( ":http_integration_lib", ":integration_lib", "//test/common/http/http2:http2_frame", + "//test/config:v2_link_hacks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) @@ -1772,7 +1818,7 @@ envoy_cc_test( "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", - "@com_googlesource_quiche//:quic_test_tools_session_peer_lib", + "@com_github_google_quiche//:quic_test_tools_session_peer_lib", ]), ) diff --git a/test/integration/README.md b/test/integration/README.md index 6e13c958aa2c1..e36b9d4d031eb 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -78,7 +78,7 @@ An example of modifying `HttpConnectionManager` to change Envoy’s HTTP/1.1 pro ```c++ config_helper_.addConfigModifier([&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { - envoy::api::v2::core::Http1ProtocolOptions options; + nvoy::config::core::v3::Http1ProtocolOptions options; options.mutable_allow_absolute_url()->set_value(true); hcm.mutable_http_protocol_options()->CopyFrom(options); };); @@ -88,7 +88,7 @@ An example of modifying `HttpConnectionManager` to add an additional upstream cluster: ```c++ - config_helper_.addConfigModifier([](envoy::config::bootstrap::v2::Bootstrap& bootstrap) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { bootstrap.mutable_rate_limit_service()->set_cluster_name("ratelimit"); auto* ratelimit_cluster = bootstrap.mutable_static_resources()->add_clusters(); ratelimit_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); diff --git a/test/integration/ads_integration.cc b/test/integration/ads_integration.cc index f356c7cd6cc25..53716bfa5e7df 100644 --- a/test/integration/ads_integration.cc +++ b/test/integration/ads_integration.cc @@ -21,17 +21,14 @@ using testing::AssertionResult; namespace Envoy { -AdsIntegrationTest::AdsIntegrationTest(envoy::config::core::v3::ApiVersion resource_api_version, - envoy::config::core::v3::ApiVersion transport_api_version) +AdsIntegrationTest::AdsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), ConfigHelper::adsBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", - resource_api_version, transport_api_version)) { + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = true; sotw_or_delta_ = sotwOrDelta(); - api_version_ = resource_api_version; setUpstreamProtocol(Http::CodecType::HTTP2); } @@ -39,36 +36,35 @@ void AdsIntegrationTest::TearDown() { cleanUpXdsConnection(); } envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildCluster(const std::string& name, const std::string& lb_policy) { - return ConfigHelper::buildCluster(name, lb_policy, api_version_); + return ConfigHelper::buildCluster(name, lb_policy); } envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildTlsCluster(const std::string& name) { - return ConfigHelper::buildTlsCluster(name, "ROUND_ROBIN", api_version_); + return ConfigHelper::buildTlsCluster(name, "ROUND_ROBIN"); } envoy::config::cluster::v3::Cluster AdsIntegrationTest::buildRedisCluster(const std::string& name) { - return ConfigHelper::buildCluster(name, "MAGLEV", api_version_); + return ConfigHelper::buildCluster(name, "MAGLEV"); } envoy::config::endpoint::v3::ClusterLoadAssignment AdsIntegrationTest::buildClusterLoadAssignment(const std::string& name) { return ConfigHelper::buildClusterLoadAssignment( name, Network::Test::getLoopbackAddressString(ipVersion()), - fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); + fake_upstreams_[0]->localAddress()->ip()->port()); } envoy::config::endpoint::v3::ClusterLoadAssignment AdsIntegrationTest::buildTlsClusterLoadAssignment(const std::string& name) { return ConfigHelper::buildClusterLoadAssignment( - name, Network::Test::getLoopbackAddressString(ipVersion()), 8443, api_version_); + name, Network::Test::getLoopbackAddressString(ipVersion()), 8443); } envoy::config::listener::v3::Listener AdsIntegrationTest::buildListener(const std::string& name, const std::string& route_config, const std::string& stat_prefix) { - return ConfigHelper::buildListener(name, route_config, - Network::Test::getLoopbackAddressString(ipVersion()), - stat_prefix, api_version_); + return ConfigHelper::buildListener( + name, route_config, Network::Test::getLoopbackAddressString(ipVersion()), stat_prefix); } envoy::config::listener::v3::Listener @@ -88,12 +84,12 @@ AdsIntegrationTest::buildRedisListener(const std::string& name, const std::strin )EOF", name, cluster); return ConfigHelper::buildBaseListener(name, Network::Test::getLoopbackAddressString(ipVersion()), - redis, api_version_); + redis); } envoy::config::route::v3::RouteConfiguration AdsIntegrationTest::buildRouteConfig(const std::string& name, const std::string& cluster) { - return ConfigHelper::buildRouteConfig(name, cluster, api_version_); + return ConfigHelper::buildRouteConfig(name, cluster); } void AdsIntegrationTest::makeSingleRequest() { @@ -130,9 +126,6 @@ void AdsIntegrationTest::initializeAds(const bool rate_limiting) { ads_cluster->mutable_transport_socket()->set_name("envoy.transport_sockets.tls"); ads_cluster->mutable_transport_socket()->mutable_typed_config()->PackFrom(context); }); - if (api_version_ == envoy::config::core::v3::ApiVersion::V2 && !fatal_by_default_v2_override_) { - config_helper_.enableDeprecatedV2Api(); - } HttpIntegrationTest::initialize(); if (xds_stream_ == nullptr) { createXdsConnection(); diff --git a/test/integration/ads_integration.h b/test/integration/ads_integration.h index 18468f975d795..adc0a66ffe023 100644 --- a/test/integration/ads_integration.h +++ b/test/integration/ads_integration.h @@ -17,10 +17,7 @@ namespace Envoy { class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public HttpIntegrationTest { public: - AdsIntegrationTest(envoy::config::core::v3::ApiVersion resource_api_version, - envoy::config::core::v3::ApiVersion transport_api_version = - envoy::config::core::v3::ApiVersion::AUTO); - AdsIntegrationTest() : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V3) {} + AdsIntegrationTest(); void TearDown() override; @@ -57,11 +54,6 @@ class AdsIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, public Ht envoy::admin::v3::ClustersConfigDump getClustersConfigDump(); envoy::admin::v3::ListenersConfigDump getListenersConfigDump(); envoy::admin::v3::RoutesConfigDump getRoutesConfigDump(); - - // If API version is v2, fatal-by-default is disabled unless fatal_by_default_v2_override_ is set. - envoy::config::core::v3::ApiVersion api_version_; - // Set to force fatal-by-default v2 even if API version is v2. - bool fatal_by_default_v2_override_{false}; }; } // namespace Envoy diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index f8045c2ad55a7..f2d0edad3ca84 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -7,12 +7,12 @@ #include "envoy/grpc/status.h" #include "source/common/config/protobuf_link_hacks.h" -#include "source/common/config/version_converter.h" #include "source/common/protobuf/protobuf.h" #include "source/common/protobuf/utility.h" #include "source/common/version/version.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/ads_integration.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" @@ -38,19 +38,18 @@ TEST_P(AdsIntegrationTest, Basic) { // Basic CDS/EDS update that warms and makes active a single cluster. TEST_P(AdsIntegrationTest, BasicClusterInitialWarming) { initialize(); - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); - const auto eds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); sendDiscoveryResponse( - cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", false); + cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0"}, {"cluster_0"}, {})); sendDiscoveryResponse( eds_type_url, {buildClusterLoadAssignment("cluster_0")}, - {buildClusterLoadAssignment("cluster_0")}, {}, "1", false); + {buildClusterLoadAssignment("cluster_0")}, {}, "1"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); @@ -60,23 +59,22 @@ TEST_P(AdsIntegrationTest, BasicClusterInitialWarming) { // manager as a whole is not initialized. TEST_P(AdsIntegrationTest, ClusterInitializationUpdateTheOnlyWarmingCluster) { initialize(); - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); - const auto eds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); sendDiscoveryResponse( - cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", false); + cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); // Update lb policy to MAGLEV so that cluster update is not skipped due to the same hash. sendDiscoveryResponse( cds_type_url, {buildCluster("cluster_0", "MAGLEV")}, {buildCluster("cluster_0", "MAGLEV")}, - {}, "2", false); + {}, "2"); EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0"}, {"cluster_0"}, {})); sendDiscoveryResponse( eds_type_url, {buildClusterLoadAssignment("cluster_0")}, - {buildClusterLoadAssignment("cluster_0")}, {}, "1", false); + {buildClusterLoadAssignment("cluster_0")}, {}, "1"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); @@ -86,8 +84,7 @@ TEST_P(AdsIntegrationTest, ClusterInitializationUpdateTheOnlyWarmingCluster) { // config and verify that all the clusters are initialized. TEST_P(AdsIntegrationTest, TestPrimaryClusterWarmClusterInitialization) { initialize(); - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); + const auto cds_type_url = Config::getTypeUrl(); auto loopback = Network::Test::getLoopbackAddressString(ipVersion()); addFakeUpstream(Http::CodecType::HTTP2); auto port = fake_upstreams_.back()->localAddress()->ip()->port(); @@ -114,7 +111,7 @@ TEST_P(AdsIntegrationTest, TestPrimaryClusterWarmClusterInitialization) { EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); sendDiscoveryResponse(cds_type_url, {warming_cluster}, - {warming_cluster}, {}, "1", false); + {warming_cluster}, {}, "1"); FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_.back()->waitForRawConnection(fake_upstream_connection)); @@ -124,7 +121,7 @@ TEST_P(AdsIntegrationTest, TestPrimaryClusterWarmClusterInitialization) { // Now replace the warming cluster by the config which will turn ready immediately. sendDiscoveryResponse(cds_type_url, {active_cluster}, - {active_cluster}, {}, "2", false); + {active_cluster}, {}, "2"); // All clusters are ready. test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); @@ -134,10 +131,9 @@ TEST_P(AdsIntegrationTest, TestPrimaryClusterWarmClusterInitialization) { // Two cluster warming, update one of them. Verify that the clusters are eventually initialized. TEST_P(AdsIntegrationTest, ClusterInitializationUpdateOneOfThe2Warming) { initialize(); - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); - const auto eds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); + const auto cds_type_url = Config::getTypeUrl(); + const auto eds_type_url = + Config::getTypeUrl(); EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); sendDiscoveryResponse( @@ -146,7 +142,7 @@ TEST_P(AdsIntegrationTest, ClusterInitializationUpdateOneOfThe2Warming) { buildCluster("cluster_0"), buildCluster("cluster_1")}, {ConfigHelper::buildStaticCluster("primary_cluster", 8000, "127.0.0.1"), buildCluster("cluster_0"), buildCluster("cluster_1")}, - {}, "1", false); + {}, "1"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 2); @@ -157,14 +153,13 @@ TEST_P(AdsIntegrationTest, ClusterInitializationUpdateOneOfThe2Warming) { buildCluster("cluster_0", "MAGLEV"), buildCluster("cluster_1")}, {ConfigHelper::buildStaticCluster("primary_cluster", 8000, "127.0.0.1"), buildCluster("cluster_0", "MAGLEV"), buildCluster("cluster_1")}, - {}, "2", false); + {}, "2"); EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0", "cluster_1"}, {"cluster_0", "cluster_1"}, {})); sendDiscoveryResponse( eds_type_url, {buildClusterLoadAssignment("cluster_0"), buildClusterLoadAssignment("cluster_1")}, - {buildClusterLoadAssignment("cluster_0"), buildClusterLoadAssignment("cluster_1")}, {}, "1", - false); + {buildClusterLoadAssignment("cluster_0"), buildClusterLoadAssignment("cluster_1")}, {}, "1"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); test_server_->waitForGaugeGe("cluster_manager.active_clusters", 4); @@ -175,11 +170,9 @@ TEST_P(AdsIntegrationTest, ClusterInitializationUpdateOneOfThe2Warming) { // This is a regression test of #11120. TEST_P(AdsIntegrationTest, ClusterSharingSecretWarming) { initialize(); - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); + const auto cds_type_url = Config::getTypeUrl(); const auto sds_type_url = - Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V3); + Config::getTypeUrl(); envoy::config::core::v3::TransportSocket sds_transport_socket; TestUtility::loadFromYaml(R"EOF( @@ -203,8 +196,8 @@ TEST_P(AdsIntegrationTest, ClusterSharingSecretWarming) { cluster_1.set_name("cluster_1"); EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); - sendDiscoveryResponse( - cds_type_url, {cluster_0, cluster_1}, {cluster_0, cluster_1}, {}, "1", false); + sendDiscoveryResponse(cds_type_url, {cluster_0, cluster_1}, + {cluster_0, cluster_1}, {}, "1"); EXPECT_TRUE(compareDiscoveryRequest(sds_type_url, "", {"validation_context"}, {"validation_context"}, {})); @@ -1057,8 +1050,7 @@ class AdsFailIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, AdsFailIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), ConfigHelper::adsBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", - envoy::config::core::v3::ApiVersion::V3)) { + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); @@ -1098,8 +1090,7 @@ class AdsConfigIntegrationTest : public Grpc::DeltaSotwIntegrationParamTest, AdsConfigIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), ConfigHelper::adsBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", - envoy::config::core::v3::ApiVersion::V3)) { + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); @@ -1247,7 +1238,7 @@ TEST_P(AdsIntegrationTest, NodeMessage) { node = &delta_request.node(); } envoy::config::core::v3::BuildVersion build_version_msg; - Config::VersionConverter::upgrade(node->user_agent_build_version(), build_version_msg); + build_version_msg.MergeFrom(node->user_agent_build_version()); EXPECT_THAT(build_version_msg, ProtoEq(VersionInfo::buildVersion())); EXPECT_GE(node->extensions().size(), 0); EXPECT_EQ(0, node->client_features().size()); @@ -1284,8 +1275,7 @@ class AdsClusterFromFileIntegrationTest : public Grpc::DeltaSotwIntegrationParam AdsClusterFromFileIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, ipVersion(), ConfigHelper::adsBootstrap( - sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC", - envoy::config::core::v3::ApiVersion::V3)) { + sotwOrDelta() == Grpc::SotwOrDelta::Sotw ? "GRPC" : "DELTA_GRPC")) { create_xds_upstream_ = true; use_lds_ = false; sotw_or_delta_ = sotwOrDelta(); @@ -1666,219 +1656,4 @@ TEST_P(XdsTpAdsIntegrationTest, Basic) { makeSingleRequest(); } -// Some v2 ADS integration tests, these validate basic v2 support but are not complete, they reflect -// tests that have historically been worth validating on both v2 and v3. They will be removed in Q1. -class AdsClusterV2Test : public AdsIntegrationTest { -public: - AdsClusterV2Test() : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V2) {} - void initialize() override { - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - auto* cluster0 = bootstrap.mutable_static_resources()->mutable_clusters(0); - cluster0->mutable_typed_extension_protocol_options()->clear(); - cluster0->mutable_http2_protocol_options(); - }); - AdsIntegrationTest::initialize(); - } -}; - -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsClusterV2Test, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); - -// Basic CDS/EDS update that warms and makes active a single cluster (v2 API). -TEST_P(AdsClusterV2Test, DEPRECATED_FEATURE_TEST(BasicClusterInitialWarming)) { - initialize(); - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - const auto eds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - - EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); - sendDiscoveryResponse( - cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", true); - test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); - EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0"}, {"cluster_0"}, {})); - sendDiscoveryResponse( - eds_type_url, {buildClusterLoadAssignment("cluster_0")}, - {buildClusterLoadAssignment("cluster_0")}, {}, "1", true); - - test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); - test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); -} - -// Verify CDS is paused during cluster warming. -TEST_P(AdsClusterV2Test, DEPRECATED_FEATURE_TEST(CdsPausedDuringWarming)) { - initialize(); - - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - const auto eds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - const auto lds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - const auto rds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - - // Send initial configuration, validate we can process a request. - EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); - sendDiscoveryResponse( - cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", true); - EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0"}, {"cluster_0"}, {})); - - sendDiscoveryResponse( - eds_type_url, {buildClusterLoadAssignment("cluster_0")}, - {buildClusterLoadAssignment("cluster_0")}, {}, "1", true); - - EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "1", {}, {}, {})); - EXPECT_TRUE(compareDiscoveryRequest(lds_type_url, "", {}, {}, {})); - sendDiscoveryResponse( - lds_type_url, {buildListener("listener_0", "route_config_0")}, - {buildListener("listener_0", "route_config_0")}, {}, "1", true); - - EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"cluster_0"}, {}, {})); - EXPECT_TRUE( - compareDiscoveryRequest(rds_type_url, "", {"route_config_0"}, {"route_config_0"}, {})); - sendDiscoveryResponse( - rds_type_url, {buildRouteConfig("route_config_0", "cluster_0")}, - {buildRouteConfig("route_config_0", "cluster_0")}, {}, "1", true); - - EXPECT_TRUE(compareDiscoveryRequest(lds_type_url, "1", {}, {}, {})); - EXPECT_TRUE(compareDiscoveryRequest(rds_type_url, "1", {"route_config_0"}, {}, {})); - - test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); - makeSingleRequest(); - - // Send the first warming cluster. - sendDiscoveryResponse( - cds_type_url, {buildCluster("warming_cluster_1")}, {buildCluster("warming_cluster_1")}, - {"cluster_0"}, "2", true); - - test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); - - EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"warming_cluster_1"}, - {"warming_cluster_1"}, {"cluster_0"})); - - // Send the second warming cluster. - sendDiscoveryResponse( - cds_type_url, {buildCluster("warming_cluster_1"), buildCluster("warming_cluster_2")}, - {buildCluster("warming_cluster_1"), buildCluster("warming_cluster_2")}, {}, "3", true); - test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 2); - // We would've got a Cluster discovery request with version 2 here, had the CDS not been paused. - - EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "1", {"warming_cluster_2", "warming_cluster_1"}, - {"warming_cluster_2"}, {})); - - // Finish warming the clusters. - sendDiscoveryResponse( - eds_type_url, - {buildClusterLoadAssignment("warming_cluster_1"), - buildClusterLoadAssignment("warming_cluster_2")}, - {buildClusterLoadAssignment("warming_cluster_1"), - buildClusterLoadAssignment("warming_cluster_2")}, - {"cluster_0"}, "2", true); - - // Validate that clusters are warmed. - test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); - - // CDS is resumed and EDS response was acknowledged. - if (sotw_or_delta_ == Grpc::SotwOrDelta::Delta) { - // Envoy will ACK both Cluster messages. Since they arrived while CDS was paused, they aren't - // sent until CDS is unpaused. Since version 3 has already arrived by the time the version 2 - // ACK goes out, they're both acknowledging version 3. - EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "3", {}, {}, {})); - } - EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "3", {}, {}, {})); - EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "2", {"warming_cluster_2", "warming_cluster_1"}, - {}, {})); -} - -// Validates that the initial xDS request batches all resources referred to in static config -TEST_P(AdsClusterV2Test, DEPRECATED_FEATURE_TEST(XdsBatching)) { - config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - bootstrap.mutable_dynamic_resources()->clear_cds_config(); - bootstrap.mutable_dynamic_resources()->clear_lds_config(); - - auto static_resources = bootstrap.mutable_static_resources(); - static_resources->add_clusters()->MergeFrom(buildCluster("eds_cluster")); - static_resources->add_clusters()->MergeFrom(buildCluster("eds_cluster2")); - - static_resources->add_listeners()->MergeFrom(buildListener("rds_listener", "route_config")); - static_resources->add_listeners()->MergeFrom(buildListener("rds_listener2", "route_config2")); - }); - - on_server_init_function_ = [this]() { - createXdsConnection(); - ASSERT_TRUE(xds_connection_->waitForNewStream(*dispatcher_, xds_stream_)); - xds_stream_->startGrpcStream(); - - const auto eds_type_url = - Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - const auto rds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - - EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"eds_cluster2", "eds_cluster"}, - {"eds_cluster2", "eds_cluster"}, {}, true)); - sendDiscoveryResponse( - eds_type_url, - {buildClusterLoadAssignment("eds_cluster"), buildClusterLoadAssignment("eds_cluster2")}, - {buildClusterLoadAssignment("eds_cluster"), buildClusterLoadAssignment("eds_cluster2")}, {}, - "1", true); - - EXPECT_TRUE(compareDiscoveryRequest(rds_type_url, "", {"route_config2", "route_config"}, - {"route_config2", "route_config"}, {})); - sendDiscoveryResponse( - rds_type_url, - {buildRouteConfig("route_config2", "eds_cluster2"), - buildRouteConfig("route_config", "dummy_cluster")}, - {buildRouteConfig("route_config2", "eds_cluster2"), - buildRouteConfig("route_config", "dummy_cluster")}, - {}, "1", true); - }; - - initialize(); -} - -// Regression test for https://github.com/envoyproxy/envoy/issues/13681. -TEST_P(AdsClusterV2Test, DEPRECATED_FEATURE_TEST(TypeUrlAnnotationRegression)) { - initialize(); - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - - EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); - auto cluster = buildCluster("cluster_0"); - auto* bias = cluster.mutable_least_request_lb_config()->mutable_active_request_bias(); - bias->set_default_value(1.1); - bias->set_runtime_key("foo"); - sendDiscoveryResponse(cds_type_url, {cluster}, {cluster}, {}, - "1", true); - - test_server_->waitForCounterGe("cluster_manager.cds.update_rejected", 1); -} - -// Validate v2 resource are rejected by default. -class AdsV2ResourceRejectTest : public AdsIntegrationTest { -public: - // We need to use a v3 transport as we're not going to set the v2 allow overrides. - AdsV2ResourceRejectTest() - : AdsIntegrationTest(envoy::config::core::v3::ApiVersion::V2, - envoy::config::core::v3::ApiVersion::V3) {} -}; - -INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsV2ResourceRejectTest, - DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); - -// If we attempt to use v2 APIs by default, the configuration should be rejected. -TEST_P(AdsV2ResourceRejectTest, DEPRECATED_FEATURE_TEST(RejectV2ConfigByDefault)) { - fatal_by_default_v2_override_ = true; - initialize(); - const auto cds_type_url = Config::getTypeUrl( - envoy::config::core::v3::ApiVersion::V2); - - EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); - sendDiscoveryResponse( - cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", true); - test_server_->waitForCounterGe("cluster_manager.cds.update_rejected", 1); - EXPECT_EQ(1, test_server_->gauge("runtime.deprecated_feature_seen_since_process_start")->value()); -} - } // namespace Envoy diff --git a/test/integration/base_integration_test.cc b/test/integration/base_integration_test.cc index 26c12716cfcb2..5b870e5e5cf47 100644 --- a/test/integration/base_integration_test.cc +++ b/test/integration/base_integration_test.cc @@ -315,7 +315,8 @@ void BaseIntegrationTest::registerTestServerPorts(const std::vector registerPort(*port_it, listen_addr->ip()->port()); } } - const auto admin_addr = test_server_->server().admin().socket().addressProvider().localAddress(); + const auto admin_addr = + test_server_->server().admin().socket().connectionInfoProvider().localAddress(); if (admin_addr->type() == Network::Address::Type::Ip) { registerPort("admin", admin_addr->ip()->port()); } @@ -334,8 +335,7 @@ void BaseIntegrationTest::createGeneratedApiTestServer( test_server_ = IntegrationTestServer::create( bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, deterministic_, timeSystem(), *api_, defer_listener_finalization_, process_object_, validator_config, - concurrency_, drain_time_, drain_strategy_, proxy_buffer_factory_, use_real_stats_, - v2_bootstrap_); + concurrency_, drain_time_, drain_strategy_, proxy_buffer_factory_, use_real_stats_); if (config_helper_.bootstrap().static_resources().listeners_size() > 0 && !defer_listener_finalization_) { diff --git a/test/integration/base_integration_test.h b/test/integration/base_integration_test.h index 18b8b02c88992..d629b0572ff35 100644 --- a/test/integration/base_integration_test.h +++ b/test/integration/base_integration_test.h @@ -9,7 +9,6 @@ #include "envoy/service/discovery/v3/discovery.pb.h" #include "source/common/config/api_version.h" -#include "source/common/config/version_converter.h" #include "source/extensions/transport_sockets/tls/context_manager_impl.h" #include "test/common/grpc/grpc_client_integration.h" @@ -143,12 +142,11 @@ class BaseIntegrationTest : protected Logger::Loggable { template void sendDiscoveryResponse(const std::string& type_url, const std::vector& state_of_the_world, const std::vector& added_or_updated, - const std::vector& removed, const std::string& version, - const bool api_downgrade = false) { + const std::vector& removed, const std::string& version) { if (sotw_or_delta_ == Grpc::SotwOrDelta::Sotw) { - sendSotwDiscoveryResponse(type_url, state_of_the_world, version, api_downgrade); + sendSotwDiscoveryResponse(type_url, state_of_the_world, version); } else { - sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, api_downgrade); + sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version); } } @@ -178,16 +176,12 @@ class BaseIntegrationTest : protected Logger::Loggable { template void sendSotwDiscoveryResponse(const std::string& type_url, const std::vector& messages, - const std::string& version, const bool api_downgrade = false) { + const std::string& version) { envoy::service::discovery::v3::DiscoveryResponse discovery_response; discovery_response.set_version_info(version); discovery_response.set_type_url(type_url); for (const auto& message : messages) { - if (api_downgrade) { - discovery_response.add_resources()->PackFrom(API_DOWNGRADE(message)); - } else { - discovery_response.add_resources()->PackFrom(message); - } + discovery_response.add_resources()->PackFrom(message); } static int next_nonce_counter = 0; discovery_response.set_nonce(absl::StrCat("nonce", next_nonce_counter++)); @@ -195,21 +189,18 @@ class BaseIntegrationTest : protected Logger::Loggable { } template - void sendDeltaDiscoveryResponse(const std::string& type_url, - const std::vector& added_or_updated, - const std::vector& removed, - const std::string& version, const bool api_downgrade = false) { - sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, xds_stream_, {}, - api_downgrade); + void + sendDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, + const std::vector& removed, const std::string& version) { + sendDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, xds_stream_, {}); } template void sendDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, const std::vector& removed, const std::string& version, - FakeStreamPtr& stream, const std::vector& aliases = {}, - const bool api_downgrade = false) { - auto response = createDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, - aliases, api_downgrade); + FakeStreamPtr& stream, const std::vector& aliases = {}) { + auto response = + createDeltaDiscoveryResponse(type_url, added_or_updated, removed, version, aliases); stream->sendGrpcMessage(response); } @@ -217,22 +208,15 @@ class BaseIntegrationTest : protected Logger::Loggable { envoy::service::discovery::v3::DeltaDiscoveryResponse createDeltaDiscoveryResponse(const std::string& type_url, const std::vector& added_or_updated, const std::vector& removed, const std::string& version, - const std::vector& aliases, - const bool api_downgrade = false) { - + const std::vector& aliases) { envoy::service::discovery::v3::DeltaDiscoveryResponse response; response.set_system_version_info("system_version_info_this_is_a_test"); response.set_type_url(type_url); for (const auto& message : added_or_updated) { auto* resource = response.add_resources(); ProtobufWkt::Any temp_any; - if (api_downgrade) { - temp_any.PackFrom(API_DOWNGRADE(message)); - resource->mutable_resource()->PackFrom(API_DOWNGRADE(message)); - } else { - temp_any.PackFrom(message); - resource->mutable_resource()->PackFrom(message); - } + temp_any.PackFrom(message); + resource->mutable_resource()->PackFrom(message); resource->set_name(intResourceName(message)); resource->set_version(version); for (const auto& alias : aliases) { @@ -442,9 +426,6 @@ class BaseIntegrationTest : protected Logger::Loggable { // This override exists for tests measuring stats memory. bool use_real_stats_{}; - // Use a v2 bootstrap. - bool v2_bootstrap_{false}; - private: // Configuration for the fake upstream. FakeUpstreamConfig upstream_config_{time_system_}; diff --git a/test/integration/base_overload_integration_test.cc b/test/integration/base_overload_integration_test.cc new file mode 100644 index 0000000000000..905339dd6d543 --- /dev/null +++ b/test/integration/base_overload_integration_test.cc @@ -0,0 +1,23 @@ +#include "test/integration/base_overload_integration_test.h" + +#include "test/test_common/utility.h" + +namespace Envoy { + +void BaseOverloadIntegrationTest::setupOverloadManagerConfig( + const envoy::config::overload::v3::OverloadAction& overload_action) { + const std::string overload_config = R"EOF( + refresh_interval: + seconds: 0 + nanos: 1000000 + resource_monitors: + - name: "envoy.resource_monitors.testonly.fake_resource_monitor" + typed_config: + "@type": type.googleapis.com/google.protobuf.Empty + )EOF"; + overload_manager_config_ = + TestUtility::parseYaml(overload_config); + *overload_manager_config_.add_actions() = overload_action; +} + +} // namespace Envoy diff --git a/test/integration/base_overload_integration_test.h b/test/integration/base_overload_integration_test.h new file mode 100644 index 0000000000000..8c29ac7f64aac --- /dev/null +++ b/test/integration/base_overload_integration_test.h @@ -0,0 +1,28 @@ +#pragma once + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/overload/v3/overload.pb.h" + +#include "test/integration/fake_resource_monitor.h" +#include "test/test_common/registry.h" + +namespace Envoy { + +class BaseOverloadIntegrationTest { +protected: + void + setupOverloadManagerConfig(const envoy::config::overload::v3::OverloadAction& overload_action); + + void updateResource(double pressure) { + auto* monitor = fake_resource_monitor_factory_.monitor(); + ASSERT(monitor != nullptr); + monitor->setResourcePressure(pressure); + } + + envoy::config::overload::v3::OverloadManager overload_manager_config_; + FakeResourceMonitorFactory fake_resource_monitor_factory_; + Registry::InjectFactory inject_factory_{ + fake_resource_monitor_factory_}; +}; + +} // namespace Envoy diff --git a/test/integration/buffer_accounting_integration_test.cc b/test/integration/buffer_accounting_integration_test.cc index b11c916e568be..4543850d83aa4 100644 --- a/test/integration/buffer_accounting_integration_test.cc +++ b/test/integration/buffer_accounting_integration_test.cc @@ -8,6 +8,8 @@ #include "source/common/buffer/buffer_impl.h" #include "test/integration/autonomous_upstream.h" +#include "test/integration/base_overload_integration_test.h" +#include "test/integration/http_protocol_integration.h" #include "test/integration/tracked_watermark_buffer.h" #include "test/integration/utility.h" #include "test/mocks/http/mocks.h" @@ -20,20 +22,49 @@ namespace Envoy { namespace { -std::string ipVersionAndBufferAccountingTestParamsToString( - const ::testing::TestParamInfo>& params) { - return fmt::format( - "{}_{}", - TestUtility::ipTestParamsToString(::testing::TestParamInfo( - std::get<0>(params.param), params.index)), - std::get<1>(params.param) ? "with_per_stream_buffer_accounting" - : "without_per_stream_buffer_accounting"); + +std::string protocolTestParamsAndBoolToString( + const ::testing::TestParamInfo>& params) { + return fmt::format("{}_{}", + HttpProtocolIntegrationTest::protocolTestParamsToString( + ::testing::TestParamInfo(std::get<0>(params.param), + /*an_index=*/0)), + std::get<1>(params.param) ? "with_per_stream_buffer_accounting" + : "without_per_stream_buffer_accounting"); +} + +void runOnWorkerThreadsAndWaitforCompletion(Server::Instance& server, std::function func) { + absl::Notification done_notification; + ThreadLocal::TypedSlotPtr<> slot; + Envoy::Thread::ThreadId main_tid; + server.dispatcher().post([&] { + slot = ThreadLocal::TypedSlot<>::makeUnique(server.threadLocal()); + slot->set( + [](Envoy::Event::Dispatcher&) -> std::shared_ptr { + return nullptr; + }); + + main_tid = server.api().threadFactory().currentThreadId(); + + slot->runOnAllThreads( + [main_tid, &server, &func](OptRef) { + // Run on the worker thread. + if (server.api().threadFactory().currentThreadId() != main_tid) { + func(); + } + }, + [&slot, &done_notification] { + slot.reset(nullptr); + done_notification.Notify(); + }); + }); + done_notification.WaitForNotification(); } } // namespace -class HttpBufferWatermarksTest +class Http2BufferWatermarksTest : public SocketInterfaceSwap, - public testing::TestWithParam>, + public testing::TestWithParam>, public HttpIntegrationTest { public: std::vector @@ -54,20 +85,26 @@ class HttpBufferWatermarksTest return responses; } - // TODO(kbaichoo): Parameterize on the client codec type when other protocols - // (H1, H3) support buffer accounting. - HttpBufferWatermarksTest() - : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, std::get<0>(GetParam())) { - config_helper_.addRuntimeOverride("envoy.test_only.per_stream_buffer_accounting", - streamBufferAccounting() ? "true" : "false"); + Http2BufferWatermarksTest() + : HttpIntegrationTest( + std::get<0>(GetParam()).downstream_protocol, std::get<0>(GetParam()).version, + ConfigHelper::httpProxyConfig( + /*downstream_is_quic=*/std::get<0>(GetParam()).downstream_protocol == + Http::CodecType::HTTP3)) { + if (streamBufferAccounting()) { + buffer_factory_ = + std::make_shared(absl::bit_width(4096u)); + } else { + buffer_factory_ = std::make_shared(); + } + setServerBufferFactory(buffer_factory_); - setDownstreamProtocol(Http::CodecClient::Type::HTTP2); - setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + setUpstreamProtocol(std::get<0>(GetParam()).upstream_protocol); } protected: - std::shared_ptr buffer_factory_ = - std::make_shared(); + // For testing purposes, track >= 4096B accounts. + std::shared_ptr buffer_factory_; bool streamBufferAccounting() { return std::get<1>(GetParam()); } @@ -91,14 +128,20 @@ class HttpBufferWatermarksTest } }; +// Run the tests using HTTP2 only since its the only protocol that's fully +// supported. +// TODO(kbaichoo): Instantiate with H3 and H1 as well when their buffers are +// bounded to accounts. INSTANTIATE_TEST_SUITE_P( - IpVersions, HttpBufferWatermarksTest, - testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool()), - ipVersionAndBufferAccountingTestParamsToString); + IpVersions, Http2BufferWatermarksTest, + testing::Combine(testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP2}, {FakeHttpConnection::Type::HTTP2})), + testing::Bool()), + protocolTestParamsAndBoolToString); // We should create four buffers each billing the same downstream request's // account which originated the chain. -TEST_P(HttpBufferWatermarksTest, ShouldCreateFourBuffersPerAccount) { +TEST_P(Http2BufferWatermarksTest, ShouldCreateFourBuffersPerAccount) { FakeStreamPtr upstream_request1; FakeStreamPtr upstream_request2; default_request_headers_.setContentLength(1000); @@ -153,7 +196,7 @@ TEST_P(HttpBufferWatermarksTest, ShouldCreateFourBuffersPerAccount) { EXPECT_TRUE(buffer_factory_->waitUntilExpectedNumberOfAccountsAndBoundBuffers(0, 0)); } -TEST_P(HttpBufferWatermarksTest, ShouldTrackAllocatedBytesToUpstream) { +TEST_P(Http2BufferWatermarksTest, ShouldTrackAllocatedBytesToUpstream) { const int num_requests = 5; const uint32_t request_body_size = 4096; const uint32_t response_body_size = 4096; @@ -188,7 +231,7 @@ TEST_P(HttpBufferWatermarksTest, ShouldTrackAllocatedBytesToUpstream) { } } -TEST_P(HttpBufferWatermarksTest, ShouldTrackAllocatedBytesToDownstream) { +TEST_P(Http2BufferWatermarksTest, ShouldTrackAllocatedBytesToDownstream) { const int num_requests = 5; const uint32_t request_body_size = 4096; const uint32_t response_body_size = 16384; @@ -224,4 +267,348 @@ TEST_P(HttpBufferWatermarksTest, ShouldTrackAllocatedBytesToDownstream) { } } +// Focuses on tests using the various codec. Currently, the accounting is only +// fully wired through with H2, but it's important to test that H1 and H3 end +// up notifying the BufferMemoryAccount when the dtor of the downstream stream +// occurs. +class ProtocolsBufferWatermarksTest + : public testing::TestWithParam>, + public HttpIntegrationTest { +public: + ProtocolsBufferWatermarksTest() + : HttpIntegrationTest( + std::get<0>(GetParam()).downstream_protocol, std::get<0>(GetParam()).version, + ConfigHelper::httpProxyConfig( + /*downstream_is_quic=*/std::get<0>(GetParam()).downstream_protocol == + Http::CodecType::HTTP3)) { + if (streamBufferAccounting()) { + buffer_factory_ = + std::make_shared(absl::bit_width(4096u)); + } else { + buffer_factory_ = std::make_shared(); + } + setServerBufferFactory(buffer_factory_); + setUpstreamProtocol(std::get<0>(GetParam()).upstream_protocol); + } + +protected: + std::shared_ptr buffer_factory_; + + bool streamBufferAccounting() { return std::get<1>(GetParam()); } +}; + +INSTANTIATE_TEST_SUITE_P( + IpVersions, ProtocolsBufferWatermarksTest, + testing::Combine(testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1, Http::CodecType::HTTP2, Http::CodecType::HTTP3}, + {FakeHttpConnection::Type::HTTP2})), + testing::Bool()), + protocolTestParamsAndBoolToString); + +TEST_P(ProtocolsBufferWatermarksTest, AccountShouldBeRegisteredAndUnregisteredOnce) { + FakeStreamPtr upstream_request1; + default_request_headers_.setContentLength(1000); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Sends the first request. + auto response1 = codec_client_->makeRequestWithBody(default_request_headers_, 1000); + waitForNextUpstreamRequest(); + upstream_request1 = std::move(upstream_request_); + + if (streamBufferAccounting()) { + EXPECT_EQ(buffer_factory_->numAccountsCreated(), 1); + } else { + EXPECT_EQ(buffer_factory_->numAccountsCreated(), 0); + } + + upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request1->encodeData(1000, true); + ASSERT_TRUE(response1->waitForEndStream()); + ASSERT_TRUE(upstream_request1->complete()); + + // Check single call to unregister if stream account, 0 otherwise + if (streamBufferAccounting()) { + EXPECT_TRUE(buffer_factory_->waitForExpectedAccountUnregistered(1)); + } else { + EXPECT_TRUE(buffer_factory_->waitForExpectedAccountUnregistered(0)); + } +} + +TEST_P(ProtocolsBufferWatermarksTest, ResettingStreamUnregistersAccount) { + FakeStreamPtr upstream_request1; + default_request_headers_.setContentLength(1000); + // H1 on RST ends up leveraging idle timeout if no active stream on the + // connection. + config_helper_.setDownstreamHttpIdleTimeout(std::chrono::milliseconds(100)); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Sends the first request. + auto response1 = codec_client_->makeRequestWithBody(default_request_headers_, 1000); + waitForNextUpstreamRequest(); + upstream_request1 = std::move(upstream_request_); + + if (streamBufferAccounting()) { + EXPECT_EQ(buffer_factory_->numAccountsCreated(), 1); + } else { + EXPECT_EQ(buffer_factory_->numAccountsCreated(), 0); + } + + if (streamBufferAccounting()) { + // Reset the downstream via the account interface on the worker thread. + EXPECT_EQ(buffer_factory_->numAccountsCreated(), 1); + Buffer::BufferMemoryAccountSharedPtr account; + auto& server = test_server_->server(); + + // Get access to the account. + buffer_factory_->inspectAccounts( + [&account](Buffer::TrackedWatermarkBufferFactory::AccountToBoundBuffersMap& map) { + for (auto& [acct, _] : map) { + account = acct; + } + }, + server); + + // Reset the stream from the worker. + runOnWorkerThreadsAndWaitforCompletion(server, [&account]() { account->resetDownstream(); }); + + if (std::get<0>(GetParam()).downstream_protocol == Http::CodecType::HTTP1) { + // For H1, we use idleTimeouts to cancel streams unless there was an + // explicit protocol error prior to sending a response to the downstream. + // Since that's not the case, the reset will fire twice, once due to + // overload manager, and once due to timeout which will close the + // connection. + ASSERT_TRUE(codec_client_->waitForDisconnect(std::chrono::milliseconds(10000))); + } else { + ASSERT_TRUE(response1->waitForReset()); + EXPECT_EQ(response1->resetReason(), Http::StreamResetReason::RemoteReset); + } + + // Wait for the upstream request to receive the reset to avoid a race when + // cleaning up the test. + ASSERT_TRUE(upstream_request1->waitForReset()); + } else { + upstream_request1->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request1->encodeData(1000, true); + ASSERT_TRUE(response1->waitForEndStream()); + ASSERT_TRUE(upstream_request1->complete()); + } + + // Check single call to unregister if stream account, 0 otherwise + if (streamBufferAccounting()) { + EXPECT_TRUE(buffer_factory_->waitForExpectedAccountUnregistered(1)); + } else { + EXPECT_TRUE(buffer_factory_->waitForExpectedAccountUnregistered(0)); + } +} + +class Http2OverloadManagerIntegrationTest : public Http2BufferWatermarksTest, + public Envoy::BaseOverloadIntegrationTest { +protected: + void initializeOverloadManagerInBootstrap( + const envoy::config::overload::v3::OverloadAction& overload_action) { + setupOverloadManagerConfig(overload_action); + overload_manager_config_.mutable_buffer_factory_config() + ->set_minimum_account_to_track_power_of_two(absl::bit_width(4096u)); + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + *bootstrap.mutable_overload_manager() = this->overload_manager_config_; + }); + } +}; + +// Run the tests using HTTP2 only since its the only protocol that's fully +// supported. +// TODO(kbaichoo): Instantiate with H3 and H1 as well when their buffers are +// bounded to accounts. +INSTANTIATE_TEST_SUITE_P( + IpVersions, Http2OverloadManagerIntegrationTest, + testing::Combine(testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP2}, {FakeHttpConnection::Type::HTTP2})), + testing::Bool()), + protocolTestParamsAndBoolToString); + +TEST_P(Http2OverloadManagerIntegrationTest, + ResetsExpensiveStreamsWhenUpstreamBuffersTakeTooMuchSpaceAndOverloaded) { + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + initializeOverloadManagerInBootstrap( + TestUtility::parseYaml(R"EOF( + name: "envoy.overload_actions.reset_high_memory_stream" + triggers: + - name: "envoy.resource_monitors.testonly.fake_resource_monitor" + scaled: + scaling_threshold: 0.90 + saturation_threshold: 0.98 + )EOF")); + initialize(); + + // Makes us have Envoy's writes to upstream return EAGAIN + writev_matcher_->setDestinationPort(fake_upstreams_[0]->localAddress()->ip()->port()); + writev_matcher_->setWritevReturnsEgain(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto smallest_request_response = std::move(sendRequests(1, 4096, 4096)[0]); + auto medium_request_response = std::move(sendRequests(1, 4096 * 2, 4096)[0]); + auto largest_request_response = std::move(sendRequests(1, 4096 * 4, 4096)[0]); + + // Wait for requests to come into Envoy. + EXPECT_TRUE(buffer_factory_->waitUntilTotalBufferedExceeds(7 * 4096)); + + // Set the pressure so the overload action kicks in + updateResource(0.95); + test_server_->waitForGaugeEq( + "overload.envoy.overload_actions.reset_high_memory_stream.scale_percent", 62); + + // Wait for the proxy to notice and take action for the overload by only + // resetting the largest stream. + if (streamBufferAccounting()) { + test_server_->waitForCounterGe("http.config_test.downstream_rq_rx_reset", 1); + test_server_->waitForCounterGe("envoy.overload_actions.reset_high_memory_stream.count", 1); + EXPECT_TRUE(largest_request_response->waitForReset()); + EXPECT_TRUE(largest_request_response->reset()); + + ASSERT_FALSE(medium_request_response->complete()); + } + + // Increase resource pressure to reset the medium request + updateResource(0.96); + + // Wait for the proxy to notice and take action for the overload. + if (streamBufferAccounting()) { + test_server_->waitForCounterGe("http.config_test.downstream_rq_rx_reset", 2); + test_server_->waitForCounterGe("envoy.overload_actions.reset_high_memory_stream.count", 2); + EXPECT_TRUE(medium_request_response->waitForReset()); + EXPECT_TRUE(medium_request_response->reset()); + + ASSERT_FALSE(smallest_request_response->complete()); + } + + // Reduce resource pressure + updateResource(0.80); + test_server_->waitForGaugeEq( + "overload.envoy.overload_actions.reset_high_memory_stream.scale_percent", 0); + + // Resume writes to upstream, any request streams that survive can go through. + writev_matcher_->setResumeWrites(); + + if (!streamBufferAccounting()) { + // If we're not doing the accounting, we didn't end up resetting these + // streams. + ASSERT_TRUE(largest_request_response->waitForEndStream()); + ASSERT_TRUE(largest_request_response->complete()); + EXPECT_EQ(largest_request_response->headers().getStatusValue(), "200"); + + ASSERT_TRUE(medium_request_response->waitForEndStream()); + ASSERT_TRUE(medium_request_response->complete()); + EXPECT_EQ(medium_request_response->headers().getStatusValue(), "200"); + } + + ASSERT_TRUE(smallest_request_response->waitForEndStream()); + ASSERT_TRUE(smallest_request_response->complete()); + EXPECT_EQ(smallest_request_response->headers().getStatusValue(), "200"); +} + +TEST_P(Http2OverloadManagerIntegrationTest, + ResetsExpensiveStreamsWhenDownstreamBuffersTakeTooMuchSpaceAndOverloaded) { + initializeOverloadManagerInBootstrap( + TestUtility::parseYaml(R"EOF( + name: "envoy.overload_actions.reset_high_memory_stream" + triggers: + - name: "envoy.resource_monitors.testonly.fake_resource_monitor" + scaled: + scaling_threshold: 0.90 + saturation_threshold: 0.98 + )EOF")); + initialize(); + + // Makes us have Envoy's writes to downstream return EAGAIN + writev_matcher_->setSourcePort(lookupPort("http")); + codec_client_ = makeHttpConnection(lookupPort("http")); + writev_matcher_->setWritevReturnsEgain(); + + auto smallest_response = std::move(sendRequests(1, 10, 4096)[0]); + waitForNextUpstreamRequest(); + FakeStreamPtr upstream_request_for_smallest_response = std::move(upstream_request_); + + auto medium_response = std::move(sendRequests(1, 20, 4096 * 2)[0]); + waitForNextUpstreamRequest(); + FakeStreamPtr upstream_request_for_medium_response = std::move(upstream_request_); + + auto largest_response = std::move(sendRequests(1, 30, 4096 * 4)[0]); + waitForNextUpstreamRequest(); + FakeStreamPtr upstream_request_for_largest_response = std::move(upstream_request_); + + // Send the responses back, without yet ending the stream. + upstream_request_for_largest_response->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_for_largest_response->encodeData(4096 * 4, false); + + upstream_request_for_medium_response->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_for_medium_response->encodeData(4096 * 2, false); + + upstream_request_for_smallest_response->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_for_smallest_response->encodeData(4096, false); + + // Wait for the responses to come back + EXPECT_TRUE(buffer_factory_->waitUntilTotalBufferedExceeds(7 * 4096)); + + // Set the pressure so the overload action kills largest response + updateResource(0.95); + test_server_->waitForGaugeEq( + "overload.envoy.overload_actions.reset_high_memory_stream.scale_percent", 62); + if (streamBufferAccounting()) { + test_server_->waitForCounterGe("http.config_test.downstream_rq_rx_reset", 1); + test_server_->waitForCounterGe("envoy.overload_actions.reset_high_memory_stream.count", 1); + ASSERT_TRUE(upstream_request_for_largest_response->waitForReset()); + } + + // Set the pressure so the overload action kills medium response + updateResource(0.96); + if (streamBufferAccounting()) { + test_server_->waitForCounterGe("http.config_test.downstream_rq_rx_reset", 2); + test_server_->waitForCounterGe("envoy.overload_actions.reset_high_memory_stream.count", 2); + ASSERT_TRUE(upstream_request_for_medium_response->waitForReset()); + } + + // Reduce resource pressure + updateResource(0.80); + test_server_->waitForGaugeEq( + "overload.envoy.overload_actions.reset_high_memory_stream.scale_percent", 0); + + // Resume writes to downstream, any responses that survive can go through. + writev_matcher_->setResumeWrites(); + + if (streamBufferAccounting()) { + EXPECT_TRUE(largest_response->waitForReset()); + EXPECT_TRUE(largest_response->reset()); + + EXPECT_TRUE(medium_response->waitForReset()); + EXPECT_TRUE(medium_response->reset()); + + } else { + // If we're not doing the accounting, we didn't end up resetting these + // streams. Finish sending data. + upstream_request_for_largest_response->encodeData(100, true); + upstream_request_for_medium_response->encodeData(100, true); + ASSERT_TRUE(largest_response->waitForEndStream()); + ASSERT_TRUE(largest_response->complete()); + EXPECT_EQ(largest_response->headers().getStatusValue(), "200"); + + ASSERT_TRUE(medium_response->waitForEndStream()); + ASSERT_TRUE(medium_response->complete()); + EXPECT_EQ(medium_response->headers().getStatusValue(), "200"); + } + + // Have the smallest response finish. + upstream_request_for_smallest_response->encodeData(100, true); + ASSERT_TRUE(smallest_response->waitForEndStream()); + ASSERT_TRUE(smallest_response->complete()); + EXPECT_EQ(smallest_response->headers().getStatusValue(), "200"); +} + } // namespace Envoy diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 4b62e426f0215..3da9db2c5e897 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -8,6 +8,7 @@ #include "source/common/protobuf/utility.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" #include "test/test_common/network_utility.h" diff --git a/test/integration/clusters/BUILD b/test/integration/clusters/BUILD index 2279cb450a9d4..79173a158252a 100644 --- a/test/integration/clusters/BUILD +++ b/test/integration/clusters/BUILD @@ -32,6 +32,7 @@ envoy_cc_test_library( "//source/extensions/transport_sockets/raw_buffer:config", "//source/server:transport_socket_config_lib", "//test/common/upstream:utility_lib", + "//test/integration/load_balancers:custom_lb_policy", "//test/test_common:registry_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/test/integration/clusters/custom_static_cluster.cc b/test/integration/clusters/custom_static_cluster.cc index a3f4397ee982d..709ea07c9b7a1 100644 --- a/test/integration/clusters/custom_static_cluster.cc +++ b/test/integration/clusters/custom_static_cluster.cc @@ -4,6 +4,8 @@ #include "envoy/config/core/v3/health_check.pb.h" #include "envoy/config/endpoint/v3/endpoint_components.pb.h" +#include "test/integration/load_balancers/custom_lb_policy.h" + namespace Envoy { // ClusterImplBase diff --git a/test/integration/clusters/custom_static_cluster.h b/test/integration/clusters/custom_static_cluster.h index 8b91386ac0d4f..5570338047634 100644 --- a/test/integration/clusters/custom_static_cluster.h +++ b/test/integration/clusters/custom_static_cluster.h @@ -35,38 +35,6 @@ class CustomStaticCluster : public Upstream::ClusterImplBase { InitializePhase initializePhase() const override { return InitializePhase::Primary; } private: - struct LbImpl : public Upstream::LoadBalancer { - LbImpl(const Upstream::HostSharedPtr& host) : host_(host) {} - - Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext*) override { - return host_; - } - Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { - return nullptr; - } - - const Upstream::HostSharedPtr host_; - }; - - struct LbFactory : public Upstream::LoadBalancerFactory { - LbFactory(const Upstream::HostSharedPtr& host) : host_(host) {} - - Upstream::LoadBalancerPtr create() override { return std::make_unique(host_); } - - const Upstream::HostSharedPtr host_; - }; - - struct ThreadAwareLbImpl : public Upstream::ThreadAwareLoadBalancer { - ThreadAwareLbImpl(const Upstream::HostSharedPtr& host) : host_(host) {} - - Upstream::LoadBalancerFactorySharedPtr factory() override { - return std::make_shared(host_); - } - void initialize() override {} - - const Upstream::HostSharedPtr host_; - }; - Upstream::ThreadAwareLoadBalancerPtr threadAwareLb(); // ClusterImplBase diff --git a/test/integration/eds_integration_test.cc b/test/integration/eds_integration_test.cc index 455f1f39e7337..2f2b47d69302d 100644 --- a/test/integration/eds_integration_test.cc +++ b/test/integration/eds_integration_test.cc @@ -91,7 +91,9 @@ class EdsIntegrationTest : public testing::TestWithParam cluster_modifier) { setUpstreamCount(4); if (codec_client_type_ == envoy::type::v3::HTTP2) { setUpstreamProtocol(Http::CodecType::HTTP2); @@ -131,11 +133,17 @@ class EdsIntegrationTest : public testing::TestWithParammutable_http_health_check()->set_codec_client_type(codec_client_type_); } setEndpoints(0, 0, 0, true, absl::nullopt, false); + + if (cluster_modifier != nullptr) { + cluster_modifier(cluster_); + } cds_helper_.setCds({cluster_}); initialize(); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); } + void initializeTest(bool http_active_hc) { initializeTest(http_active_hc, nullptr); } + envoy::type::v3::CodecClientType codec_client_type_{}; EdsHelper eds_helper_; CdsHelper cds_helper_; @@ -234,6 +242,30 @@ TEST_P(EdsIntegrationTest, RemoveAfterHcFail) { EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_total")->value()); } +// Verifies that cluster warming proceeds even if a host is deleted before health checks complete. +// This is a regression test for https://github.com/envoyproxy/envoy/issues/17836. +TEST_P(EdsIntegrationTest, FinishWarmingIgnoreHealthCheck) { + codec_client_type_ = envoy::type::v3::HTTP2; + initializeTest(true, [](envoy::config::cluster::v3::Cluster& cluster) { + cluster.set_ignore_health_on_host_removal(true); + }); + setEndpoints(1, 0, 0, false); + EXPECT_EQ(1, test_server_->gauge("cluster.cluster_0.membership_total")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster.cluster_0.membership_healthy")->value()); + EXPECT_EQ(0, test_server_->gauge("cluster_manager.warming_clusters")->value()); + + // Trigger a CDS update. This should cause a new cluster to require warming, blocked on the host + // being health checked. + cluster_.mutable_circuit_breakers()->add_thresholds()->mutable_max_connections()->set_value(100); + cds_helper_.setCds({cluster_}); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + // Clear out the host before the health check finishes (regardless of success/error/timeout) and + // ensure that warming_clusters goes to 0 to avoid a permanent warming state. + setEndpoints(0, 0, 0, true, absl::nullopt, false); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); +} + // Verifies that endpoints are ignored until health checked when configured to. TEST_P(EdsIntegrationTest, EndpointWarmingSuccessfulHc) { cluster_.mutable_common_lb_config()->set_ignore_new_hosts_until_first_hc(true); diff --git a/test/integration/extension_discovery_integration_test.cc b/test/integration/extension_discovery_integration_test.cc index ded9093dccbb9..7a6457027fcc1 100644 --- a/test/integration/extension_discovery_integration_test.cc +++ b/test/integration/extension_discovery_integration_test.cc @@ -4,6 +4,7 @@ #include "envoy/service/extension/v3/config_discovery.pb.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/filters/set_is_terminal_filter_config.pb.h" #include "test/integration/filters/set_response_code_filter_config.pb.h" #include "test/integration/http_integration.h" @@ -30,7 +31,7 @@ std::string denyPrivateConfigWithMatcher() { "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig prefix: "/private" code: 403 - matcher: + xds_matcher: matcher_tree: input: name: request-headers @@ -84,7 +85,7 @@ class ExtensionDiscoveryIntegrationTest : public Grpc::GrpcClientIntegrationPara typed_config: "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig code: 403 - matcher: + xds_matcher: matcher_tree: input: name: request-headers diff --git a/test/integration/fake_resource_monitor.cc b/test/integration/fake_resource_monitor.cc new file mode 100644 index 0000000000000..9dc8e4b0060ab --- /dev/null +++ b/test/integration/fake_resource_monitor.cc @@ -0,0 +1,24 @@ +#include "test/integration/fake_resource_monitor.h" + +namespace Envoy { + +FakeResourceMonitor::~FakeResourceMonitor() { factory_.onMonitorDestroyed(this); } + +void FakeResourceMonitor::updateResourceUsage(Callbacks& callbacks) { + Server::ResourceUsage usage; + usage.resource_pressure_ = pressure_; + callbacks.onSuccess(usage); +} + +void FakeResourceMonitorFactory::onMonitorDestroyed(FakeResourceMonitor* monitor) { + ASSERT(monitor_ == monitor); + monitor_ = nullptr; +} +Server::ResourceMonitorPtr FakeResourceMonitorFactory::createResourceMonitor( + const Protobuf::Message&, Server::Configuration::ResourceMonitorFactoryContext& context) { + auto monitor = std::make_unique(context.dispatcher(), *this); + monitor_ = monitor.get(); + return monitor; +} + +} // namespace Envoy diff --git a/test/integration/fake_resource_monitor.h b/test/integration/fake_resource_monitor.h new file mode 100644 index 0000000000000..84d40eb70dcb9 --- /dev/null +++ b/test/integration/fake_resource_monitor.h @@ -0,0 +1,51 @@ +#pragma once + +#include "envoy/server/resource_monitor.h" +#include "envoy/server/resource_monitor_config.h" + +#include "test/common/config/dummy_config.pb.h" + +namespace Envoy { + +class FakeResourceMonitorFactory; + +class FakeResourceMonitor : public Server::ResourceMonitor { +public: + FakeResourceMonitor(Event::Dispatcher& dispatcher, FakeResourceMonitorFactory& factory) + : dispatcher_(dispatcher), factory_(factory), pressure_(0.0) {} + // Server::ResourceMonitor + ~FakeResourceMonitor() override; + void updateResourceUsage(Callbacks& callbacks) override; + + void setResourcePressure(double pressure) { + dispatcher_.post([this, pressure] { pressure_ = pressure; }); + } + +private: + Event::Dispatcher& dispatcher_; + FakeResourceMonitorFactory& factory_; + double pressure_; +}; + +class FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitorFactory { +public: + // Server::Configuration::ResourceMonitorFactory + Server::ResourceMonitorPtr + createResourceMonitor(const Protobuf::Message& config, + Server::Configuration::ResourceMonitorFactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + std::string name() const override { + return "envoy.resource_monitors.testonly.fake_resource_monitor"; + } + + FakeResourceMonitor* monitor() const { return monitor_; } + void onMonitorDestroyed(FakeResourceMonitor* monitor); + +private: + FakeResourceMonitor* monitor_{nullptr}; +}; + +} // namespace Envoy diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index d0dd2ee4a41a2..9ba6ef3333ac0 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -92,7 +92,7 @@ void FakeStream::encodeHeaders(const Http::HeaderMap& headers, bool end_stream) Http::createHeaderMap(headers)); if (add_served_by_header_) { headers_copy->addCopy(Http::LowerCaseString("x-served-by"), - parent_.connection().addressProvider().localAddress()->asString()); + parent_.connection().connectionInfoProvider().localAddress()->asString()); } postToConnectionThread([this, headers_copy, end_stream]() -> void { @@ -284,8 +284,12 @@ AssertionResult FakeStream::waitForReset(milliseconds timeout) { return AssertionSuccess(); } -void FakeStream::startGrpcStream() { - encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); +void FakeStream::startGrpcStream(bool send_headers) { + ASSERT(!grpc_stream_started_, "gRPC stream should not be started more than once"); + grpc_stream_started_ = true; + if (send_headers) { + encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + } } void FakeStream::finishGrpcStream(Grpc::Status::GrpcStatus status) { diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 2758cbad861c2..a64ba2dab4b15 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -147,9 +147,11 @@ class FakeStream : public Http::RequestDecoder, waitForReset(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); // gRPC convenience methods. - void startGrpcStream(); + void startGrpcStream(bool send_headers = true); void finishGrpcStream(Grpc::Status::GrpcStatus status); template void sendGrpcMessage(const T& message) { + ASSERT(grpc_stream_started_, + "start gRPC stream by calling startGrpcStream before sending a message"); auto serialized_response = Grpc::Common::serializeToGrpcFrame(message); encodeData(*serialized_response, false); ENVOY_LOG(debug, "Sent gRPC message: {}", message.DebugString()); @@ -249,6 +251,7 @@ class FakeStream : public Http::RequestDecoder, absl::node_hash_map duplicated_metadata_key_count_; std::unique_ptr stream_info_; bool received_data_{false}; + bool grpc_stream_started_{false}; }; using FakeStreamPtr = std::unique_ptr; @@ -617,7 +620,7 @@ class FakeUpstream : Logger::Loggable, waitForRawConnection(FakeRawConnectionPtr& connection, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); Network::Address::InstanceConstSharedPtr localAddress() const { - return socket_->addressProvider().localAddress(); + return socket_->connectionInfoProvider().localAddress(); } virtual std::unique_ptr @@ -702,7 +705,7 @@ class FakeUpstream : Logger::Loggable, // Network::ListenSocketFactory Network::Socket::Type socketType() const override { return socket_->socketType(); } const Network::Address::InstanceConstSharedPtr& localAddress() const override { - return socket_->addressProvider().localAddress(); + return socket_->connectionInfoProvider().localAddress(); } Network::SocketSharedPtr getListenSocket(uint32_t) override { return socket_; } Network::ListenSocketFactoryPtr clone() const override { return nullptr; } diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index dc129a66cf181..c8e796b6c89a0 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -9,18 +9,24 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_proto_library( + name = "add_body_filter_proto", + srcs = ["add_body_filter.proto"], +) + envoy_cc_test_library( name = "add_body_filter_config_lib", srcs = [ "add_body_filter.cc", ], deps = [ + ":add_body_filter_proto_cc_proto", ":common_lib", "//envoy/http:filter_interface", "//envoy/registry", "//envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:factory_base_lib", "//source/extensions/filters/http/common:pass_through_filter_lib", - "//test/extensions/filters/http/common:empty_http_filter_config_lib", ], ) @@ -69,6 +75,27 @@ envoy_cc_test_library( ], ) +envoy_proto_library( + name = "crash_filter_proto", + srcs = ["crash_filter.proto"], +) + +envoy_cc_test_library( + name = "crash_filter_config_lib", + srcs = [ + "crash_filter.cc", + ], + deps = [ + ":common_lib", + ":crash_filter_proto_cc_proto", + "//envoy/http:filter_interface", + "//envoy/registry", + "//envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:factory_base_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) + envoy_cc_test_library( name = "local_reply_with_metadata_filter_lib", srcs = [ diff --git a/test/integration/filters/add_body_filter.cc b/test/integration/filters/add_body_filter.cc index 0f8b968e1417a..e6b101032ab8b 100644 --- a/test/integration/filters/add_body_filter.cc +++ b/test/integration/filters/add_body_filter.cc @@ -5,26 +5,49 @@ #include "envoy/server/filter_config.h" #include "source/common/buffer/buffer_impl.h" +#include "source/extensions/filters/http/common/factory_base.h" #include "source/extensions/filters/http/common/pass_through_filter.h" -#include "test/extensions/filters/http/common/empty_http_filter_config.h" +#include "test/integration/filters/add_body_filter.pb.h" +#include "test/integration/filters/add_body_filter.pb.validate.h" #include "test/integration/filters/common.h" namespace Envoy { +class AddBodyFilterConfig { +public: + AddBodyFilterConfig( + test::integration::filters::AddBodyFilterConfig::FilterCallback where_to_add_body, + uint32_t body_size, + test::integration::filters::AddBodyFilterConfig::FilterCallback where_to_stop_and_buffer) + : where_to_add_body_(where_to_add_body), body_size_(body_size), + where_to_stop_and_buffer_(where_to_stop_and_buffer) {} + + const test::integration::filters::AddBodyFilterConfig::FilterCallback where_to_add_body_; + const uint32_t body_size_; + const test::integration::filters::AddBodyFilterConfig::FilterCallback where_to_stop_and_buffer_; +}; + // A test filter that adds body data to a request/response without body payload. class AddBodyStreamFilter : public Http::PassThroughFilter { public: - constexpr static char name[] = "add-body-filter"; + AddBodyStreamFilter(std::shared_ptr config) : config_(config) {} Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) override { - if (end_stream) { - Buffer::OwnedImpl body("body"); + if (config_->where_to_add_body_ == test::integration::filters::AddBodyFilterConfig::DEFAULT) { + if (end_stream) { + Buffer::OwnedImpl body("body"); + headers.setContentLength(body.length()); + decoder_callbacks_->addDecodedData(body, false); + } else { + headers.removeContentLength(); + } + } else if (config_->where_to_add_body_ == + test::integration::filters::AddBodyFilterConfig::DECODE_HEADERS) { + Buffer::OwnedImpl body(std::string(config_->body_size_, 'a')); headers.setContentLength(body.length()); decoder_callbacks_->addDecodedData(body, false); - } else { - headers.removeContentLength(); } return Http::FilterHeadersStatus::Continue; @@ -34,37 +57,90 @@ class AddBodyStreamFilter : public Http::PassThroughFilter { // Ensure that decodeData is only called for HTTP/3 (where protocol is set at the // connection level). In HTTP/3 the FIN arrives separately so we will get // decodeData() with an empty body. - if (end_stream && decoder_callbacks_->connection()->streamInfo().protocol() && - data.length() == 0u) { - data.add("body"); + if (config_->where_to_add_body_ == test::integration::filters::AddBodyFilterConfig::DEFAULT) { + if (end_stream && decoder_callbacks_->connection()->streamInfo().protocol() && + data.length() == 0u) { + data.add("body"); + } + } else if (config_->where_to_add_body_ == + test::integration::filters::AddBodyFilterConfig::DECODE_DATA) { + data.add(std::string(config_->body_size_, 'a')); + } + + return config_->where_to_stop_and_buffer_ == + test::integration::filters::AddBodyFilterConfig::DECODE_DATA + ? Http::FilterDataStatus::StopIterationAndBuffer + : Http::FilterDataStatus::Continue; + } + + Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override { + if (config_->where_to_add_body_ == + test::integration::filters::AddBodyFilterConfig::DECODE_TRAILERS) { + Buffer::OwnedImpl body(std::string(config_->body_size_, 'a')); + decoder_callbacks_->addDecodedData(body, false); } - return Http::FilterDataStatus::Continue; + return Http::FilterTrailersStatus::Continue; } Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override { // Ensure that encodeData is only called for HTTP/3 (where protocol is set at the // connection level). In HTTP/3 the FIN arrives separately so we will get // encodeData() with an empty body. - ASSERT(!end_stream || decoder_callbacks_->connection()->streamInfo().protocol()); - data.add("body"); - return Http::FilterDataStatus::Continue; + if (config_->where_to_add_body_ == test::integration::filters::AddBodyFilterConfig::DEFAULT) { + if (end_stream && decoder_callbacks_->connection()->streamInfo().protocol() && + data.length() == 0) { + data.add("body"); + } + } else if (config_->where_to_add_body_ == + test::integration::filters::AddBodyFilterConfig::ENCODE_DATA) { + data.add(std::string(config_->body_size_, 'a')); + } + + return config_->where_to_stop_and_buffer_ == + test::integration::filters::AddBodyFilterConfig::ENCODE_DATA + ? Http::FilterDataStatus::StopIterationAndBuffer + : Http::FilterDataStatus::Continue; } Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) override { - if (end_stream) { - Buffer::OwnedImpl body("body"); + if (config_->where_to_add_body_ == test::integration::filters::AddBodyFilterConfig::DEFAULT) { + if (end_stream) { + Buffer::OwnedImpl body("body"); + headers.setContentLength(body.length()); + encoder_callbacks_->addEncodedData(body, false); + } + } else if (config_->where_to_add_body_ == + test::integration::filters::AddBodyFilterConfig::ENCODE_HEADERS) { + Buffer::OwnedImpl body(std::string(config_->body_size_, 'a')); headers.setContentLength(body.length()); encoder_callbacks_->addEncodedData(body, false); } return Http::FilterHeadersStatus::Continue; } + +private: + const std::shared_ptr config_; }; -constexpr char AddBodyStreamFilter::name[]; +class AddBodyFilterFactory : public Extensions::HttpFilters::Common::FactoryBase< + test::integration::filters::AddBodyFilterConfig> { +public: + AddBodyFilterFactory() : FactoryBase("add-body-filter") {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const test::integration::filters::AddBodyFilterConfig& proto_config, const std::string&, + Server::Configuration::FactoryContext&) override { + auto filter_config = std::make_shared( + proto_config.where_to_add_body(), proto_config.body_size(), + proto_config.where_to_stop_and_buffer()); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config)); + }; + } +}; -static Registry::RegisterFactory, - Server::Configuration::NamedHttpFilterConfigFactory> - encoder_register_; +REGISTER_FACTORY(AddBodyFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); } // namespace Envoy diff --git a/test/integration/filters/add_body_filter.proto b/test/integration/filters/add_body_filter.proto new file mode 100644 index 0000000000000..d6345f1610ae9 --- /dev/null +++ b/test/integration/filters/add_body_filter.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package test.integration.filters; + +import "validate/validate.proto"; + +message AddBodyFilterConfig { + enum FilterCallback { + DEFAULT = 0; + DECODE_HEADERS = 1; + DECODE_DATA = 2; + DECODE_TRAILERS = 3; + ENCODE_HEADERS = 4; + ENCODE_DATA = 5; + } + + FilterCallback where_to_add_body = 1; + uint32 body_size = 2; + FilterCallback where_to_stop_and_buffer = 3; +} diff --git a/test/integration/filters/address_restore_listener_filter.cc b/test/integration/filters/address_restore_listener_filter.cc index f0d6945019760..84cd33d2c14e7 100644 --- a/test/integration/filters/address_restore_listener_filter.cc +++ b/test/integration/filters/address_restore_listener_filter.cc @@ -16,11 +16,11 @@ class FakeOriginalDstListenerFilter : public Network::ListenerFilter { Network::FilterStatus onAccept(Network::ListenerFilterCallbacks& cb) override { FANCY_LOG(debug, "in FakeOriginalDstListenerFilter::onAccept"); Network::ConnectionSocket& socket = cb.socket(); - socket.addressProvider().restoreLocalAddress( + socket.connectionInfoProvider().restoreLocalAddress( std::make_shared("127.0.0.2", 80)); FANCY_LOG(debug, "current local socket address is {} restored = {}", - socket.addressProvider().localAddress()->asString(), - socket.addressProvider().localAddressRestored()); + socket.connectionInfoProvider().localAddress()->asString(), + socket.connectionInfoProvider().localAddressRestored()); return Network::FilterStatus::Continue; } }; diff --git a/test/integration/filters/crash_filter.cc b/test/integration/filters/crash_filter.cc new file mode 100644 index 0000000000000..57cf8f5531abb --- /dev/null +++ b/test/integration/filters/crash_filter.cc @@ -0,0 +1,90 @@ +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "source/common/buffer/buffer_impl.h" +#include "source/extensions/filters/http/common/factory_base.h" +#include "source/extensions/filters/http/common/pass_through_filter.h" + +#include "test/integration/filters/common.h" +#include "test/integration/filters/crash_filter.pb.h" +#include "test/integration/filters/crash_filter.pb.validate.h" + +namespace Envoy { + +class CrashFilterConfig { +public: + CrashFilterConfig(bool crash_in_encode_headers, bool crash_in_encode_data, + bool crash_in_decode_headers, bool crash_in_decode_data, + bool crash_in_decode_trailers) + : crash_in_encode_headers_(crash_in_encode_headers), + crash_in_encode_data_(crash_in_encode_data), + crash_in_decode_headers_(crash_in_decode_headers), + crash_in_decode_data_(crash_in_decode_data), + crash_in_decode_trailers_(crash_in_decode_trailers) {} + + const bool crash_in_encode_headers_; + const bool crash_in_encode_data_; + + const bool crash_in_decode_headers_; + const bool crash_in_decode_data_; + const bool crash_in_decode_trailers_; +}; + +// A test filter that adds body data to a request/response without body payload. +class CrashFilter : public Http::PassThroughFilter { +public: + CrashFilter(std::shared_ptr config) : config_(config) {} + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { + RELEASE_ASSERT(!config_->crash_in_decode_headers_, "Crash in decodeTrailers"); + return Http::FilterHeadersStatus::Continue; + } + + Http::FilterDataStatus decodeData(Buffer::Instance&, bool) override { + RELEASE_ASSERT(!config_->crash_in_decode_data_, "Crash in decodeData"); + return Http::FilterDataStatus::Continue; + } + + Http::FilterTrailersStatus decodeTrailers(Http::RequestTrailerMap&) override { + RELEASE_ASSERT(!config_->crash_in_decode_trailers_, "Crash in decodeTrailers"); + return Http::FilterTrailersStatus::Continue; + } + + Http::FilterDataStatus encodeData(Buffer::Instance&, bool) override { + RELEASE_ASSERT(!config_->crash_in_encode_data_, "Crash in encodeData"); + return Http::FilterDataStatus::Continue; + } + + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap&, bool) override { + RELEASE_ASSERT(!config_->crash_in_encode_headers_, "Crash in encodeHeaders"); + return Http::FilterHeadersStatus::Continue; + } + +private: + const std::shared_ptr config_; +}; + +class CrashFilterFactory : public Extensions::HttpFilters::Common::FactoryBase< + test::integration::filters::CrashFilterConfig> { +public: + CrashFilterFactory() : FactoryBase("crash-filter") {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const test::integration::filters::CrashFilterConfig& proto_config, const std::string&, + Server::Configuration::FactoryContext&) override { + auto filter_config = std::make_shared( + proto_config.crash_in_encode_headers(), proto_config.crash_in_encode_data(), + proto_config.crash_in_decode_headers(), proto_config.crash_in_decode_data(), + proto_config.crash_in_decode_trailers()); + return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter(std::make_shared(filter_config)); + }; + } +}; + +REGISTER_FACTORY(CrashFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); +} // namespace Envoy diff --git a/test/integration/filters/crash_filter.proto b/test/integration/filters/crash_filter.proto new file mode 100644 index 0000000000000..98c90ef006097 --- /dev/null +++ b/test/integration/filters/crash_filter.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package test.integration.filters; + +import "validate/validate.proto"; + +message CrashFilterConfig { + bool crash_in_encode_headers = 1; + bool crash_in_encode_data = 2; + + bool crash_in_decode_headers = 3; + bool crash_in_decode_data = 4; + bool crash_in_decode_trailers = 5; +} diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index b7938878f0352..d6fd680867ef2 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -27,8 +27,7 @@ namespace Envoy { namespace { // TODO(jmarantz): switch this to simulated-time after debugging flakes. -class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, - public HttpIntegrationTest { +class HdsIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { public: HdsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) {} @@ -38,17 +37,13 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, HttpIntegrationTest::createUpstreams(); } void initialize() override { - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { - config_helper_.enableDeprecatedV2Api(); - } setUpstreamCount(upstream_endpoints_); - config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { // Setup hds and corresponding gRPC cluster. auto* hds_config = bootstrap.mutable_hds_config(); hds_config->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); hds_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); hds_config->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("hds_cluster"); - hds_config->set_transport_api_version(apiVersion()); auto* hds_cluster = bootstrap.mutable_static_resources()->add_clusters(); hds_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); hds_cluster->mutable_circuit_breakers()->Clear(); @@ -211,9 +206,6 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, health_check.mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2); health_check.mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2); health_check.mutable_health_checks(0)->mutable_grpc_health_check(); - health_check.mutable_health_checks(0) - ->mutable_http_health_check() - ->set_hidden_envoy_deprecated_use_http2(false); health_check.mutable_health_checks(0)->mutable_http_health_check()->set_path("/healthcheck"); return health_check; @@ -278,9 +270,7 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, healthy, host_upstream_->localAddress())) { ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, response_)); EXPECT_EQ("POST", hds_stream_->headers().getMethodValue()); - EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.{1}.{0}.HealthDiscoveryService", - "StreamHealthCheck", apiVersion(), - serviceNamespace()), + EXPECT_EQ("/envoy.service.health.v3.HealthDiscoveryService/StreamHealthCheck", hds_stream_->headers().getPathValue()); EXPECT_EQ("application/grpc", hds_stream_->headers().getContentTypeValue()); } @@ -333,9 +323,7 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, } EXPECT_EQ("POST", hds_stream_->headers().getMethodValue()); - EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.{1}.{0}.HealthDiscoveryService", - "StreamHealthCheck", apiVersion(), - serviceNamespace()), + EXPECT_EQ("/envoy.service.health.v3.HealthDiscoveryService/StreamHealthCheck", hds_stream_->headers().getPathValue()); EXPECT_EQ("application/grpc", hds_stream_->headers().getContentTypeValue()); } @@ -343,19 +331,6 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, return true; } - const std::string serviceNamespace() const { - switch (apiVersion()) { - case envoy::config::core::v3::ApiVersion::AUTO: - FALLTHRU; - case envoy::config::core::v3::ApiVersion::V2: - return "discovery"; - case envoy::config::core::v3::ApiVersion::V3: - return "health"; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - } - static constexpr uint32_t upstream_endpoints_ = 0; FakeHttpConnectionPtr hds_fake_connection_; @@ -379,14 +354,12 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, envoy::service::health::v3::HealthCheckSpecifier server_health_check_specifier_; }; -INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, HdsIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, HdsIntegrationTest, GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); // Tests Envoy HTTP health checking a single healthy endpoint and reporting that it is // indeed healthy to the server. TEST_P(HdsIntegrationTest, SingleEndpointHealthyHttp) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy @@ -422,7 +395,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointHealthyHttp) { // Tests Envoy HTTP health checking a single endpoint that times out and reporting // that it is unhealthy to the server. TEST_P(HdsIntegrationTest, SingleEndpointTimeoutHttp) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); server_health_check_specifier_ = makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); @@ -464,7 +436,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointTimeoutHttp) { // Tests Envoy HTTP health checking a single unhealthy endpoint and reporting that it is // indeed unhealthy to the server. TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyHttp) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); server_health_check_specifier_ = makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); @@ -498,7 +469,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyHttp) { // Tests Envoy TCP health checking an endpoint that doesn't respond and reporting that it is // unhealthy to the server. TEST_P(HdsIntegrationTest, SingleEndpointTimeoutTcp) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy @@ -539,7 +509,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointTimeoutTcp) { // Tests Envoy TCP health checking a single healthy endpoint and reporting that it is // indeed healthy to the server. TEST_P(HdsIntegrationTest, SingleEndpointHealthyTcp) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy @@ -570,7 +539,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointHealthyTcp) { // Tests Envoy TCP health checking a single unhealthy endpoint and reporting that it is // indeed unhealthy to the server. TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTcp) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy @@ -605,7 +573,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTcp) { // Tests that Envoy can HTTP health check two hosts that are in the same cluster, and // the same locality and report back the correct health statuses. TEST_P(HdsIntegrationTest, TwoEndpointsSameLocality) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); server_health_check_specifier_ = @@ -665,7 +632,6 @@ TEST_P(HdsIntegrationTest, TwoEndpointsSameLocality) { // Tests that Envoy can HTTP health check two hosts that are in the same cluster, and // different localities and report back the correct health statuses. TEST_P(HdsIntegrationTest, TwoEndpointsDifferentLocality) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); server_health_check_specifier_ = makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); @@ -734,7 +700,6 @@ TEST_P(HdsIntegrationTest, TwoEndpointsDifferentLocality) { // Tests that Envoy can HTTP health check two hosts that are in different clusters, and // report back the correct health statuses. TEST_P(HdsIntegrationTest, TwoEndpointsDifferentClusters) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); server_health_check_specifier_ = makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); @@ -803,7 +768,6 @@ TEST_P(HdsIntegrationTest, TwoEndpointsDifferentClusters) { // Tests Envoy HTTP health checking a single endpoint, receiving an update // message from the management server and health checking a new endpoint TEST_P(HdsIntegrationTest, TestUpdateMessage) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy @@ -851,9 +815,6 @@ TEST_P(HdsIntegrationTest, TestUpdateMessage) { health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2); health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2); health_check->mutable_health_checks(0)->mutable_grpc_health_check(); - health_check->mutable_health_checks(0) - ->mutable_http_health_check() - ->set_hidden_envoy_deprecated_use_http2(false); health_check->mutable_health_checks(0)->mutable_http_health_check()->set_path("/healthcheck"); // Server asks for health checking with the new message @@ -885,7 +846,6 @@ TEST_P(HdsIntegrationTest, TestUpdateMessage) { // Tests Envoy HTTP health checking a single endpoint, receiving an update // message from the management server and reporting in a new interval TEST_P(HdsIntegrationTest, TestUpdateChangesTimer) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy @@ -926,7 +886,6 @@ TEST_P(HdsIntegrationTest, TestUpdateChangesTimer) { // Tests Envoy HTTP health checking a single endpoint when interval hasn't been defined TEST_P(HdsIntegrationTest, TestDefaultTimer) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy @@ -954,7 +913,6 @@ TEST_P(HdsIntegrationTest, TestDefaultTimer) { // Health checks a single endpoint over TLS with HTTP/2 TEST_P(HdsIntegrationTest, SingleEndpointHealthyTlsHttp2) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; // Change member variable to specify host streams to have tls transport socket. tls_hosts_ = true; @@ -995,7 +953,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointHealthyTlsHttp2) { // Health checks a single endpoint over TLS with HTTP/1 TEST_P(HdsIntegrationTest, SingleEndpointHealthyTlsHttp1) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; // Change member variable to specify host streams to have tls transport socket. tls_hosts_ = true; @@ -1033,7 +990,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointHealthyTlsHttp1) { // Attempts to health check a TLS endpoint over plaintext, which should fail. TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTlsMissingSocketMatch) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; // Make the endpoints expect communication over TLS. tls_hosts_ = true; @@ -1074,7 +1030,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTlsMissingSocketMatch) { } TEST_P(HdsIntegrationTest, UpdateEndpoints) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); server_health_check_specifier_ = makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); @@ -1195,7 +1150,6 @@ TEST_P(HdsIntegrationTest, UpdateEndpoints) { // Tests Envoy HTTP health checking a custom healthy endpoint and reporting it is healthy TEST_P(HdsIntegrationTest, SingleEndpointHealthyHttpCustomPort) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy @@ -1239,7 +1193,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointHealthyHttpCustomPort) { // Tests Envoy HTTP health checking a single unhealthy endpoint and reporting that it is // indeed unhealthy to the server. TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyHttpCustomPort) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); server_health_check_specifier_ = @@ -1281,7 +1234,6 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyHttpCustomPort) { // Tests Envoy keeps sending EndpointHealthResponses after the HDS server reconnection TEST_P(HdsIntegrationTest, SingleEndpointHealthyHttpHdsReconnect) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); // Server <--> Envoy diff --git a/test/integration/header_integration_test.cc b/test/integration/header_integration_test.cc index 0abf8213f4d4e..2ceef32088453 100644 --- a/test/integration/header_integration_test.cc +++ b/test/integration/header_integration_test.cc @@ -11,6 +11,7 @@ #include "source/common/http/exception.h" #include "source/common/protobuf/protobuf.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "test/test_common/network_utility.h" #include "test/test_common/resources.h" @@ -42,11 +43,7 @@ const std::string http_connection_mgr_config = R"EOF( - name: envoy.filters.http.router codec_type: HTTP1 use_remote_address: false -original_ip_detection_extensions: -- name: envoy.http.original_ip_detection.xff - typed_config: - "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.xff.v3.XffConfig - xff_num_trusted_hops: 1 +xff_num_trusted_hops: 1 stat_prefix: header_test route_config: virtual_hosts: diff --git a/test/integration/health_check_integration_test.cc b/test/integration/health_check_integration_test.cc index c0ea0fea9a742..dcd6efdee9156 100644 --- a/test/integration/health_check_integration_test.cc +++ b/test/integration/health_check_integration_test.cc @@ -5,6 +5,7 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/common/http/http2/http2_frame.h" #include "test/common/upstream/utility.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "gtest/gtest.h" @@ -538,6 +539,7 @@ class GrpcHealthCheckIntegrationTest : public Event::TestUsingSimulatedTime, void sendGrpcResponse(uint32_t cluster_idx, const Http::TestResponseHeaderMapImpl& response_headers, const grpc::health::v1::HealthCheckResponse& health_check_response) { + clusters_[cluster_idx].host_stream_->startGrpcStream(false); clusters_[cluster_idx].host_stream_->encodeHeaders(response_headers, false); clusters_[cluster_idx].host_stream_->sendGrpcMessage(health_check_response); clusters_[cluster_idx].host_stream_->finishGrpcStream(Grpc::Status::WellKnownGrpcStatus::Ok); diff --git a/test/integration/http2_flood_integration_test.cc b/test/integration/http2_flood_integration_test.cc index 2e20e34a56484..64db06468019e 100644 --- a/test/integration/http2_flood_integration_test.cc +++ b/test/integration/http2_flood_integration_test.cc @@ -1499,4 +1499,64 @@ TEST_P(Http2FloodMitigationTest, UpstreamFloodDetectionIsOnByDefault) { "cluster.cluster_0.http2.outbound_control_flood"); } +class Http2ManyStreamsTest + : public testing::TestWithParam>, + public Http2RawFrameIntegrationTest { +protected: + Http2ManyStreamsTest() : Http2RawFrameIntegrationTest(std::get<0>(GetParam())) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.improved_stream_limit_handling", + useImprovedStreamLimitHandling() ? "true" : "false"); + } + + bool useImprovedStreamLimitHandling() const { return std::get<1>(GetParam()); } +}; + +INSTANTIATE_TEST_SUITE_P( + IpVersionsAndRuntimeFeature, Http2ManyStreamsTest, + testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), testing::Bool())); + +TEST_P(Http2ManyStreamsTest, UpstreamRstStreamStormOnDownstreamCloseRegressionTest) { + const uint32_t num_requests = 80; + + envoy::config::core::v3::Http2ProtocolOptions config; + config.mutable_max_concurrent_streams()->set_value(num_requests / 2); + mergeOptions(config); + autonomous_upstream_ = true; + autonomous_allow_incomplete_streams_ = true; + config_helper_.setUpstreamOutboundFramesLimits(AllFrameFloodLimit, ControlFrameFloodLimit); + beginSession(); + + // Send a normal request and wait for the response as a way to prime the upstream connection and + // ensure that SETTINGS are exchanged. Skipping this step may result in the upstream seeing too + // many active streams at the same time and terminating the connection to the proxy since stream + // limits were not obeyed. + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_EQ("200", response->headers().getStatusValue()); + + // Open a large number of streams and wait until they are active at the proxy. + for (uint32_t i = 0; i < num_requests; ++i) { + auto request = Http2Frame::makeRequest(Http2Frame::makeClientStreamId(i), "host", "/", + {Http2Frame::Header("no_end_stream", "1")}); + sendFrame(request); + } + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_rq_active", num_requests, + TestUtility::DefaultTimeout); + + // Disconnect downstream connection. Envoy should send RST_STREAM to cancel active upstream + // requests. + tcp_client_->close(); + + // Wait until the disconnect is detected and all upstream connections have been closed. + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_rq_active", 0, + TestUtility::DefaultTimeout); + + // The disconnect shouldn't trigger an outbound control frame flood. + EXPECT_EQ(0, test_server_->counter("cluster.cluster_0.http2.outbound_control_flood")->value()); + // Verify that the upstream connections are still active. + EXPECT_EQ(useImprovedStreamLimitHandling() ? 2 : 1, + test_server_->gauge("cluster.cluster_0.upstream_cx_active")->value()); +} + } // namespace Envoy diff --git a/test/integration/http_conn_pool_integration_test.cc b/test/integration/http_conn_pool_integration_test.cc index a1dd4e9fb4007..8f94fa0533005 100644 --- a/test/integration/http_conn_pool_integration_test.cc +++ b/test/integration/http_conn_pool_integration_test.cc @@ -16,10 +16,11 @@ class HttpConnPoolIntegrationTest : public HttpProtocolIntegrationTest { envoy::config::cluster::v3::CircuitBreakers circuit_breakers; auto* threshold = circuit_breakers.mutable_thresholds()->Add(); threshold->mutable_max_connection_pools()->set_value(1); - bootstrap.mutable_static_resources() - ->mutable_clusters(0) - ->mutable_circuit_breakers() - ->MergeFrom(circuit_breakers); + auto* static_resources = bootstrap.mutable_static_resources(); + for (int i = 0; i < static_resources->clusters_size(); ++i) { + static_resources->mutable_clusters(i)->mutable_circuit_breakers()->MergeFrom( + circuit_breakers); + } }); HttpProtocolIntegrationTest::initialize(); } @@ -87,5 +88,96 @@ TEST_P(HttpConnPoolIntegrationTest, PoolCleanupAfterRemoteClose) { test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 0); } +// Verify that the drainConnections() cluster manager API works correctly. +TEST_P(HttpConnPoolIntegrationTest, PoolDrainAfterDrainApiSpecificCluster) { + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024); + waitForNextUpstreamRequest(); + + // Validate that the circuit breaker config is setup as we expect. + test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 1); + + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(512, true); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + + // Drain connection pools via API. Need to post this to the server thread. + test_server_->server().dispatcher().post( + [this] { test_server_->server().clusterManager().drainConnections("cluster_0"); }); + + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + + // Validate that the pool is deleted when it becomes idle. + test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 0); +} + +// Verify that the drainConnections() cluster manager API works correctly. +TEST_P(HttpConnPoolIntegrationTest, PoolDrainAfterDrainApiAllClusters) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( + *bootstrap.mutable_static_resources()->mutable_clusters(0)); + bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + }); + + setUpstreamCount(2); + + auto host = config_helper_.createVirtualHost("cluster_1.com", "/", "cluster_1"); + config_helper_.addVirtualHost(host); + + config_helper_.setDefaultHostAndRoute("cluster_0.com", "/"); + + initialize(); + + // Request Flow to cluster_0. + codec_client_ = makeHttpConnection(lookupPort("http")); + default_request_headers_.setHost("cluster_0.com"); + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024); + waitForNextUpstreamRequest(); + + // Validate that the circuit breaker config is setup as we expect. + test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 1); + + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(512, true); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + + auto first_connection = std::move(fake_upstream_connection_); + codec_client_->close(); + + // Request Flow to cluster_1. + codec_client_ = makeHttpConnection(lookupPort("http")); + default_request_headers_.setHost("cluster_1.com"); + response = codec_client_->makeRequestWithBody(default_request_headers_, 1024); + waitForNextUpstreamRequest(1); + + // Validate that the circuit breaker config is setup as we expect. + test_server_->waitForGaugeEq("cluster.cluster_1.circuit_breakers.default.cx_pool_open", 1); + + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(512, true); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + + // Drain connection pools via API. Need to post this to the server thread. + test_server_->server().dispatcher().post( + [this] { test_server_->server().clusterManager().drainConnections(); }); + + ASSERT_TRUE(first_connection->waitForDisconnect()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + + test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 0); + test_server_->waitForGaugeEq("cluster.cluster_1.circuit_breakers.default.cx_pool_open", 0); +} + } // namespace } // namespace Envoy diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 0430602910755..3cb411a0c730a 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -1404,93 +1404,6 @@ void HttpIntegrationTest::testAdminDrain(Http::CodecType admin_request_type) { } } -void HttpIntegrationTest::testMaxStreamDuration() { - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - ConfigHelper::HttpProtocolOptions protocol_options; - auto* http_protocol_options = protocol_options.mutable_common_http_protocol_options(); - http_protocol_options->mutable_max_stream_duration()->MergeFrom( - ProtobufUtil::TimeUtil::MillisecondsToDuration(200)); - ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), - protocol_options); - }); - - initialize(); - codec_client_ = makeHttpConnection(lookupPort("http")); - - auto encoder_decoder = codec_client_->startRequest(default_request_headers_); - request_encoder_ = &encoder_decoder.first; - auto response = std::move(encoder_decoder.second); - - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - - test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); - - if (downstream_protocol_ == Http::CodecType::HTTP1) { - ASSERT_TRUE(codec_client_->waitForDisconnect()); - } else { - ASSERT_TRUE(response->waitForEndStream()); - codec_client_->close(); - } -} - -void HttpIntegrationTest::testMaxStreamDurationWithRetry(bool invoke_retry_upstream_disconnect) { - config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - ConfigHelper::HttpProtocolOptions protocol_options; - auto* http_protocol_options = protocol_options.mutable_common_http_protocol_options(); - http_protocol_options->mutable_max_stream_duration()->MergeFrom( - ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); - ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), - protocol_options); - }); - - Http::TestRequestHeaderMapImpl retriable_header = Http::TestRequestHeaderMapImpl{ - {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, - {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, {"x-envoy-retry-on", "5xx"}}; - initialize(); - codec_client_ = makeHttpConnection(lookupPort("http")); - - auto encoder_decoder = codec_client_->startRequest(retriable_header); - request_encoder_ = &encoder_decoder.first; - auto response = std::move(encoder_decoder.second); - - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - - if (fake_upstreams_[0]->httpType() == Http::CodecType::HTTP1) { - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - } else { - ASSERT_TRUE(upstream_request_->waitForReset()); - } - - test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); - - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - - if (invoke_retry_upstream_disconnect) { - test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 2); - if (downstream_protocol_ == Http::CodecType::HTTP1) { - ASSERT_TRUE(codec_client_->waitForDisconnect()); - } else { - ASSERT_TRUE(response->waitForEndStream()); - codec_client_->close(); - } - - EXPECT_EQ("408", response->headers().getStatusValue()); - } else { - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; - upstream_request_->encodeHeaders(response_headers, true); - - response->waitForHeaders(); - codec_client_->close(); - - EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().getStatusValue()); - } -} - std::string HttpIntegrationTest::downstreamProtocolStatsRoot() const { switch (downstreamProtocol()) { case Http::CodecClient::Type::HTTP1: diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 34a6ed6c82ee7..a872292747a60 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -237,11 +237,8 @@ class HttpIntegrationTest : public BaseIntegrationTest { void testTrailers(uint64_t request_size, uint64_t response_size, bool request_trailers_present, bool response_trailers_present); // Test /drain_listener from admin portal. - void testAdminDrain(Http::CodecType admin_request_type); - // Test max stream duration. - void testMaxStreamDuration(); - void testMaxStreamDurationWithRetry(bool invoke_retry_upstream_disconnect); - Http::CodecType downstreamProtocol() const { return downstream_protocol_; } + void testAdminDrain(Http::CodecClient::Type admin_request_type); + Http::CodecClient::Type downstreamProtocol() const { return downstream_protocol_; } std::string downstreamProtocolStatsRoot() const; // Return the upstream protocol part of the stats root. std::string upstreamProtocolStatsRoot() const; diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index 6d9b1d96e363d..d32ec0b1a94f7 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -15,7 +15,8 @@ class HttpSubsetLbIntegrationTest : public testing::TestWithParam, public HttpIntegrationTest { public: - // Returns all load balancer types except ORIGINAL_DST_LB and CLUSTER_PROVIDED. + // Returns all load balancer types except ORIGINAL_DST_LB, CLUSTER_PROVIDED + // and LOAD_BALANCING_POLICY_CONFIG. static std::vector getSubsetLbTestParams() { int first = static_cast(envoy::config::cluster::v3::Cluster::LbPolicy_MIN); int last = static_cast(envoy::config::cluster::v3::Cluster::LbPolicy_MAX); diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index b694e98c55ec8..578017e301112 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -573,7 +573,6 @@ TEST_P(StatsMatcherIntegrationTest, ExcludePrefixServerDot) { } TEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(DISABLED_ExcludeRequests)) { - v2_bootstrap_ = true; stats_matcher_.mutable_exclusion_list()->add_patterns()->MergeFrom( TestUtility::createRegexMatcher(".*requests.*")); initialize(); @@ -589,7 +588,6 @@ TEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(ExcludeExact)) { } TEST_P(StatsMatcherIntegrationTest, DEPRECATED_FEATURE_TEST(DISABLED_ExcludeMultipleExact)) { - v2_bootstrap_ = true; stats_matcher_.mutable_exclusion_list()->add_patterns()->set_exact("server.concurrency"); stats_matcher_.mutable_exclusion_list()->add_patterns()->MergeFrom( TestUtility::createRegexMatcher(".*live")); diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 48f68e41e49ac..450e787dd71e1 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -3,8 +3,8 @@ #include #include "envoy/config/bootstrap/v3/bootstrap.pb.h" -#include "envoy/config/filter/http/grpc_http1_bridge/v2/config.pb.h" #include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/extensions/filters/http/grpc_http1_bridge/v3/config.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "source/common/http/header_map_impl.h" @@ -68,7 +68,7 @@ TEST_P(IntegrationTest, BadPrebindSocketOptionWithReusePort) { config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); listener->mutable_address()->mutable_socket_address()->set_port_value( - addr_socket.second->addressProvider().localAddress()->ip()->port()); + addr_socket.second->connectionInfoProvider().localAddress()->ip()->port()); auto socket_option = listener->add_socket_options(); socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_PREBIND); socket_option->set_level(10000); // Invalid level. @@ -89,7 +89,7 @@ TEST_P(IntegrationTest, BadPostbindSocketOptionWithReusePort) { config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); listener->mutable_address()->mutable_socket_address()->set_port_value( - addr_socket.second->addressProvider().localAddress()->ip()->port()); + addr_socket.second->connectionInfoProvider().localAddress()->ip()->port()); auto socket_option = listener->add_socket_options(); socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_BOUND); socket_option->set_level(10000); // Invalid level. @@ -157,6 +157,53 @@ TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { check_listener_stats(0, 1); } +// Make sure all workers pick up connections +TEST_P(IntegrationTest, AllWorkersAreHandlingLoad) { + concurrency_ = 2; + initialize(); + + std::string worker0_stat_name, worker1_stat_name; + if (GetParam() == Network::Address::IpVersion::v4) { + worker0_stat_name = "listener.127.0.0.1_0.worker_0.downstream_cx_total"; + worker1_stat_name = "listener.127.0.0.1_0.worker_1.downstream_cx_total"; + } else { + worker0_stat_name = "listener.[__1]_0.worker_0.downstream_cx_total"; + worker1_stat_name = "listener.[__1]_0.worker_1.downstream_cx_total"; + } + + test_server_->waitForCounterEq(worker0_stat_name, 0); + test_server_->waitForCounterEq(worker1_stat_name, 0); + + // We set the counters for the two workers to see how many connections each handles. + uint64_t w0_ctr = 0; + uint64_t w1_ctr = 0; + constexpr int loops = 5; + for (int i = 0; i < loops; i++) { + constexpr int requests_per_loop = 4; + std::array connections; + for (int j = 0; j < requests_per_loop; j++) { + connections[j] = makeHttpConnection(lookupPort("http")); + } + + auto worker0_ctr = test_server_->counter(worker0_stat_name); + auto worker1_ctr = test_server_->counter(worker1_stat_name); + auto target = w0_ctr + w1_ctr + requests_per_loop; + while (test_server_->counter(worker0_stat_name)->value() + + test_server_->counter(worker1_stat_name)->value() < + target) { + timeSystem().advanceTimeWait(std::chrono::milliseconds(10)); + } + w0_ctr = test_server_->counter(worker0_stat_name)->value(); + w1_ctr = test_server_->counter(worker1_stat_name)->value(); + for (int j = 0; j < requests_per_loop; j++) { + connections[j]->close(); + } + } + + EXPECT_TRUE(w0_ctr > 1); + EXPECT_TRUE(w1_ctr > 1); +} + TEST_P(IntegrationTest, RouterDirectResponseWithBody) { const std::string body = "Response body"; const std::string file_path = TestEnvironment::writeStringToFileForTest("test_envoy", body); @@ -395,7 +442,74 @@ name: matcher typed_config: "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig code: 403 - matcher: + xds_matcher: + matcher_tree: + input: + name: request-headers + typed_config: + "@type": type.googleapis.com/envoy.type.matcher.v3.HttpRequestHeaderMatchInput + header_name: match-header + exact_match_map: + map: + match: + action: + name: skip + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.common.matcher.action.v3.SkipFilter +)EOF"); + + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + { + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_THAT(response->headers(), HttpStatusIs("403")); + } + + { + codec_client_ = makeHttpConnection(lookupPort("http")); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"match-header", "match"}, {"content-type", "application/grpc"}}; + auto response = codec_client_->makeRequestWithBody(request_headers, 1024); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, true); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_THAT(response->headers(), HttpStatusIs("200")); + } + + auto second_codec = makeHttpConnection(lookupPort("http")); + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"match-header", "not-match"}, {"content-type", "application/grpc"}}; + auto response = second_codec->makeRequestWithBody(request_headers, 1024); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_THAT(response->headers(), HttpStatusIs("200")); + + codec_client_->close(); + second_codec->close(); +} + +// Verifies that we can construct a match tree with a filter using the new matcher tree proto, and +// that we are able to skip filter invocation through the match tree. +TEST_P(IntegrationTest, MatchingHttpFilterConstructionNewProto) { + concurrency_ = 2; + config_helper_.addRuntimeOverride("envoy.reloadable_features.experimental_matching_api", "true"); + + config_helper_.addFilter(R"EOF( +name: matcher +typed_config: + "@type": type.googleapis.com/envoy.extensions.common.matching.v3.ExtensionWithMatcher + extension_config: + name: set-response-code + typed_config: + "@type": type.googleapis.com/test.integration.filters.SetResponseCodeFilterConfig + code: 403 + xds_matcher: matcher_tree: input: name: request-headers @@ -508,16 +622,13 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_200", 2); } -const ::envoy::config::filter::http::grpc_http1_bridge::v2::Config _grpc_http1_bridge_dummy; - // Test hitting the bridge filter with too many response bytes to buffer. Given // the headers are not proxied, the connection manager will send a local error reply. TEST_P(IntegrationTest, HittingGrpcFilterLimitBufferingHeaders) { config_helper_.addFilter( "{ name: grpc_http1_bridge, typed_config: { \"@type\": " - "type.googleapis.com/envoy.config.filter.http.grpc_http1_bridge.v2.Config } }"); + "type.googleapis.com/envoy.extensions.filters.http.grpc_http1_bridge.v3.Config } }"); config_helper_.setBufferLimits(1024, 1024); - initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -1292,7 +1403,7 @@ TEST_P(IntegrationTest, TestBind) { ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_NE(fake_upstream_connection_, nullptr); std::string address = fake_upstream_connection_->connection() - .addressProvider() + .connectionInfoProvider() .remoteAddress() ->ip() ->addressAsString(); diff --git a/test/integration/listener_lds_integration_test.cc b/test/integration/listener_lds_integration_test.cc index 55a2c00a797f8..47d7859bf5f03 100644 --- a/test/integration/listener_lds_integration_test.cc +++ b/test/integration/listener_lds_integration_test.cc @@ -9,9 +9,9 @@ #include "envoy/service/discovery/v3/discovery.pb.h" #include "source/common/config/api_version.h" -#include "source/common/config/version_converter.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "test/test_common/network_utility.h" #include "test/test_common/printers.h" diff --git a/test/integration/load_balancers/BUILD b/test/integration/load_balancers/BUILD new file mode 100644 index 0000000000000..60e6b81a25cd0 --- /dev/null +++ b/test/integration/load_balancers/BUILD @@ -0,0 +1,24 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test_library( + name = "custom_lb_policy", + srcs = [ + "custom_lb_policy.cc", + ], + hdrs = [ + "custom_lb_policy.h", + ], + deps = [ + "//envoy/upstream:load_balancer_interface", + "//source/common/upstream:load_balancer_factory_base_lib", + "//test/test_common:registry_lib", + ], +) diff --git a/test/integration/load_balancers/custom_lb_policy.cc b/test/integration/load_balancers/custom_lb_policy.cc new file mode 100644 index 0000000000000..785da9e6895b5 --- /dev/null +++ b/test/integration/load_balancers/custom_lb_policy.cc @@ -0,0 +1,9 @@ +#include "test/integration/load_balancers/custom_lb_policy.h" + +#include "envoy/registry/registry.h" + +namespace Envoy { + +REGISTER_FACTORY(CustomLbFactory, Upstream::TypedLoadBalancerFactory); + +} // namespace Envoy diff --git a/test/integration/load_balancers/custom_lb_policy.h b/test/integration/load_balancers/custom_lb_policy.h new file mode 100644 index 0000000000000..55e5e60cef81f --- /dev/null +++ b/test/integration/load_balancers/custom_lb_policy.h @@ -0,0 +1,60 @@ +#pragma once + +#include "envoy/upstream/load_balancer.h" + +#include "source/common/upstream/load_balancer_factory_base.h" + +#include "test/test_common/registry.h" + +namespace Envoy { + +class ThreadAwareLbImpl : public Upstream::ThreadAwareLoadBalancer { +public: + ThreadAwareLbImpl() : host_(nullptr) {} + ThreadAwareLbImpl(const Upstream::HostSharedPtr& host) : host_(host) {} + + Upstream::LoadBalancerFactorySharedPtr factory() override { + return std::make_shared(host_); + } + void initialize() override {} + +private: + class LbImpl : public Upstream::LoadBalancer { + public: + LbImpl(const Upstream::HostSharedPtr& host) : host_(host) {} + + Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext*) override { + return host_; + } + Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { + return nullptr; + } + + const Upstream::HostSharedPtr host_; + }; + + class LbFactory : public Upstream::LoadBalancerFactory { + public: + LbFactory(const Upstream::HostSharedPtr& host) : host_(host) {} + + Upstream::LoadBalancerPtr create() override { return std::make_unique(host_); } + + const Upstream::HostSharedPtr host_; + }; + + const Upstream::HostSharedPtr host_; +}; + +class CustomLbFactory : public Upstream::TypedLoadBalancerFactoryBase { +public: + CustomLbFactory() : TypedLoadBalancerFactoryBase("envoy.load_balancers.custom_lb") {} + + Upstream::ThreadAwareLoadBalancerPtr + create(const Upstream::PrioritySet&, Upstream::ClusterStats&, Stats::Scope&, Runtime::Loader&, + Random::RandomGenerator&, + const ::envoy::config::cluster::v3::LoadBalancingPolicy_Policy&) override { + return std::make_unique(); + } +}; + +} // namespace Envoy diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index 630db22619042..213622cc8cf54 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -17,7 +17,7 @@ namespace Envoy { namespace { -class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, +class LoadStatsIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, public HttpIntegrationTest { public: LoadStatsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) { @@ -105,16 +105,13 @@ class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara } void initialize() override { - if (apiVersion() != envoy::config::core::v3::ApiVersion::V3) { - config_helper_.enableDeprecatedV2Api(); - } setUpstreamCount(upstream_endpoints_); config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { // Setup load reporting and corresponding gRPC cluster. auto* loadstats_config = bootstrap.mutable_cluster_manager()->mutable_load_stats_config(); loadstats_config->set_api_type(envoy::config::core::v3::ApiConfigSource::GRPC); loadstats_config->add_grpc_services()->mutable_envoy_grpc()->set_cluster_name("load_report"); - loadstats_config->set_transport_api_version(apiVersion()); + loadstats_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); auto* load_report_cluster = bootstrap.mutable_static_resources()->add_clusters(); load_report_cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); load_report_cluster->mutable_circuit_breakers()->Clear(); @@ -288,10 +285,8 @@ class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara mergeLoadStats(loadstats_request, local_loadstats_request); EXPECT_EQ("POST", loadstats_stream_->headers().getMethodValue()); - EXPECT_EQ( - TestUtility::getVersionedMethodPath("envoy.service.load_stats.{}.LoadReportingService", - "StreamLoadStats", apiVersion()), - loadstats_stream_->headers().getPathValue()); + EXPECT_EQ("/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats", + loadstats_stream_->headers().getPathValue()); EXPECT_EQ("application/grpc", loadstats_stream_->headers().getContentTypeValue()); if (!bound.withinBound()) { return TestUtility::assertRepeatedPtrFieldEqual(expected_cluster_stats, @@ -390,13 +385,12 @@ class LoadStatsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara }; INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, LoadStatsIntegrationTest, - VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS, - Grpc::VersionedGrpcClientIntegrationParamTest::protocolTestParamsToString); + GRPC_CLIENT_INTEGRATION_PARAMS, + Grpc::GrpcClientIntegrationParamTest::protocolTestParamsToString); // Validate the load reports for successful requests as cluster membership // changes. TEST_P(LoadStatsIntegrationTest, Success) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); waitForLoadStatsStream(); @@ -504,7 +498,6 @@ TEST_P(LoadStatsIntegrationTest, Success) { // weighted LB. This serves as a de facto integration test for locality weighted // LB. TEST_P(LoadStatsIntegrationTest, LocalityWeighted) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; locality_weighted_lb_ = true; initialize(); @@ -540,7 +533,6 @@ TEST_P(LoadStatsIntegrationTest, LocalityWeighted) { // Validate the load reports for requests when all endpoints are non-local. TEST_P(LoadStatsIntegrationTest, NoLocalLocality) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; sub_zone_ = "summer"; initialize(); @@ -575,7 +567,6 @@ TEST_P(LoadStatsIntegrationTest, NoLocalLocality) { // Validate the load reports for successful/error requests make sense. TEST_P(LoadStatsIntegrationTest, Error) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); waitForLoadStatsStream(); @@ -602,7 +593,6 @@ TEST_P(LoadStatsIntegrationTest, Error) { // Validate the load reports for in-progress make sense. TEST_P(LoadStatsIntegrationTest, InProgress) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; initialize(); waitForLoadStatsStream(); @@ -626,7 +616,6 @@ TEST_P(LoadStatsIntegrationTest, InProgress) { // Validate the load reports for dropped requests make sense. TEST_P(LoadStatsIntegrationTest, Dropped) { - XDS_DEPRECATED_FEATURE_TEST_SKIP; config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* cluster_0 = bootstrap.mutable_static_resources()->mutable_clusters(0); auto* thresholds = cluster_0->mutable_circuit_breakers()->add_thresholds(); diff --git a/test/integration/local_reply_integration_test.cc b/test/integration/local_reply_integration_test.cc index 3e8f7af5338a0..6207651326677 100644 --- a/test/integration/local_reply_integration_test.cc +++ b/test/integration/local_reply_integration_test.cc @@ -197,6 +197,57 @@ TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJson4Grpc) { expected_grpc_message)); } +// Like MapStatusCodeAndFormatToJson4Grpc, but to non-json format. +// When grpc is plain text, the grpc-message should remains the same and envoy +// should not truncate the trailing '\n' characters. +TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormat2Text4Grpc) { + const std::string yaml = R"EOF( +body_format: + text_format_source: + inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" +)EOF"; + setLocalReplyConfig(yaml); + initialize(); + + // Note: there should be an %0A at the end. + const std::string expected_grpc_message = + "upstream connect error or disconnect/reset before headers. reset reason:" + " connection termination:503:path=/package.service/method%0A"; + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/package.service/method"}, + {":scheme", "http"}, + {":authority", "host"}, + {"content-type", "application/grpc"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + ASSERT_TRUE(response->waitForEndStream()); + + if (downstream_protocol_ == Http::CodecType::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("application/grpc", response->headers().ContentType()->value().getStringView()); + EXPECT_EQ("14", response->headers().GrpcStatus()->value().getStringView()); + // Check if grpc-message value is same as expected + EXPECT_EQ(std::string(response->headers().GrpcMessage()->value().getStringView()), + expected_grpc_message); +} + // Matched second filter has code, headers and body rewrite and its format TEST_P(LocalReplyIntegrationTest, MapStatusCodeAndFormatToJsonForFirstMatchingFilter) { const std::string yaml = R"EOF( diff --git a/test/integration/multiplexed_integration_test.cc b/test/integration/multiplexed_integration_test.cc index cf9980620f788..c55fc1dabfbeb 100644 --- a/test/integration/multiplexed_integration_test.cc +++ b/test/integration/multiplexed_integration_test.cc @@ -3,6 +3,10 @@ #include #include +#ifdef ENVOY_ENABLE_QUIC +#include "source/common/quic/client_connection_factory_impl.h" +#endif + #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" @@ -123,7 +127,6 @@ TEST_P(Http2IntegrationTest, LargeRequestTrailersRejected) { testLargeRequestTra // Verify downstream codec stream flush timeout. TEST_P(Http2IntegrationTest, CodecStreamIdleTimeout) { - EXCLUDE_DOWNSTREAM_HTTP3; // Need to support stream_idle_timeout. config_helper_.setBufferLimits(1024, 1024); config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -133,16 +136,29 @@ TEST_P(Http2IntegrationTest, CodecStreamIdleTimeout) { hcm.mutable_stream_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); }); initialize(); + const size_t stream_flow_control_window = + downstream_protocol_ == Http::CodecType::HTTP3 ? 32 * 1024 : 65535; envoy::config::core::v3::Http2ProtocolOptions http2_options = ::Envoy::Http2::Utility::initializeAndValidateOptions( envoy::config::core::v3::Http2ProtocolOptions()); - http2_options.mutable_initial_stream_window_size()->set_value(65535); + http2_options.mutable_initial_stream_window_size()->set_value(stream_flow_control_window); +#ifdef ENVOY_ENABLE_QUIC + if (downstream_protocol_ == Http::CodecType::HTTP3) { + dynamic_cast(*quic_connection_persistent_info_) + .quic_config_.SetInitialStreamFlowControlWindowToSend(stream_flow_control_window); + dynamic_cast(*quic_connection_persistent_info_) + .quic_config_.SetInitialSessionFlowControlWindowToSend(stream_flow_control_window); + } +#endif codec_client_ = makeRawHttpConnection(makeClientConnection(lookupPort("http")), http2_options); auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(default_response_headers_, false); - upstream_request_->encodeData(70000, true); - test_server_->waitForCounterEq("http2.tx_flush_timeout", 1); + upstream_request_->encodeData(stream_flow_control_window + 2000, true); + std::string flush_timeout_counter(downstreamProtocol() == Http::CodecType::HTTP3 + ? "http3.tx_flush_timeout" + : "http2.tx_flush_timeout"); + test_server_->waitForCounterEq(flush_timeout_counter, 1); ASSERT_TRUE(response->waitForReset()); } @@ -1655,6 +1671,35 @@ TEST_P(Http2FrameIntegrationTest, SetDetailsTwice) { EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("too_many_headers")); } +TEST_P(Http2FrameIntegrationTest, UpstreamSettingsMaxStreamsAfterGoAway) { + beginSession(); + FakeRawConnectionPtr fake_upstream_connection; + + const uint32_t client_stream_idx = 1; + // Start a request request and wait for it to reach the upstream. + sendFrame(Http2Frame::makePostRequest(client_stream_idx, "host", "/path/to/long/url")); + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + const Http2Frame settings_frame = Http2Frame::makeEmptySettingsFrame(); + ASSERT_TRUE(fake_upstream_connection->write(std::string(settings_frame))); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_rq_active", 1); + + // Send RST_STREAM, GOAWAY and SETTINGS(0 max streams) + const Http2Frame rst_stream = + Http2Frame::makeResetStreamFrame(client_stream_idx, Http2Frame::ErrorCode::FlowControlError); + const Http2Frame go_away_frame = + Http2Frame::makeEmptyGoAwayFrame(12345, Http2Frame::ErrorCode::NoError); + const Http2Frame settings_max_connections_frame = Http2Frame::makeSettingsFrame( + Http2Frame::SettingsFlags::None, {{NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0}}); + ; + ASSERT_TRUE(fake_upstream_connection->write(std::string(rst_stream) + std::string(go_away_frame) + + std::string(settings_max_connections_frame))); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_close_notify", 1); + + // Cleanup. + tcp_client_->close(); +} + INSTANTIATE_TEST_SUITE_P(IpVersions, Http2FrameIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); @@ -1817,4 +1862,24 @@ TEST_P(Http2IntegrationTest, OnLocalReply) { } } +TEST_P(Http2IntegrationTest, InvalidTrailers) { + useAccessLog("%RESPONSE_CODE_DETAILS%"); + autonomous_upstream_ = true; + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Start the request. + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + auto response = std::move(encoder_decoder.second); + request_encoder_ = &encoder_decoder.first; + + std::string value = std::string(1, 2); + EXPECT_FALSE(Http::HeaderUtility::headerValueIsValid(value)); + codec_client_->sendTrailers(*request_encoder_, + Http::TestRequestTrailerMapImpl{{"trailer", value}}); + ASSERT_TRUE(response->waitForReset()); + // http2.invalid.header.field or http3.invalid_header_field + EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("invalid")); +} + } // namespace Envoy diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index b9dfd3b6629cf..a27f99e4b421b 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -2,13 +2,10 @@ #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/overload/v3/overload.pb.h" -#include "envoy/server/resource_monitor.h" -#include "envoy/server/resource_monitor_config.h" -#include "test/common/config/dummy_config.pb.h" +#include "test/integration/base_overload_integration_test.h" #include "test/integration/http_protocol_integration.h" #include "test/integration/ssl_utility.h" -#include "test/test_common/registry.h" #include "absl/strings/str_cat.h" @@ -18,100 +15,18 @@ namespace Envoy { using testing::HasSubstr; -class FakeResourceMonitorFactory; - -class FakeResourceMonitor : public Server::ResourceMonitor { -public: - FakeResourceMonitor(Event::Dispatcher& dispatcher, FakeResourceMonitorFactory& factory) - : dispatcher_(dispatcher), factory_(factory), pressure_(0.0) {} - ~FakeResourceMonitor() override; - void updateResourceUsage(Callbacks& callbacks) override; - - void setResourcePressure(double pressure) { - dispatcher_.post([this, pressure] { pressure_ = pressure; }); - } - -private: - Event::Dispatcher& dispatcher_; - FakeResourceMonitorFactory& factory_; - double pressure_; -}; - -class FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitorFactory { -public: - FakeResourceMonitor* monitor() const { return monitor_; } - Server::ResourceMonitorPtr - createResourceMonitor(const Protobuf::Message& config, - Server::Configuration::ResourceMonitorFactoryContext& context) override; - - ProtobufTypes::MessagePtr createEmptyConfigProto() override { - return std::make_unique(); - } - - std::string name() const override { - return "envoy.resource_monitors.testonly.fake_resource_monitor"; - } - - void onMonitorDestroyed(FakeResourceMonitor* monitor); - -private: - FakeResourceMonitor* monitor_{nullptr}; -}; - -FakeResourceMonitor::~FakeResourceMonitor() { factory_.onMonitorDestroyed(this); } - -void FakeResourceMonitor::updateResourceUsage(Callbacks& callbacks) { - Server::ResourceUsage usage; - usage.resource_pressure_ = pressure_; - callbacks.onSuccess(usage); -} - -void FakeResourceMonitorFactory::onMonitorDestroyed(FakeResourceMonitor* monitor) { - ASSERT(monitor_ == monitor); - monitor_ = nullptr; -} - -Server::ResourceMonitorPtr FakeResourceMonitorFactory::createResourceMonitor( - const Protobuf::Message&, Server::Configuration::ResourceMonitorFactoryContext& context) { - auto monitor = std::make_unique(context.dispatcher(), *this); - monitor_ = monitor.get(); - return monitor; -} - -class OverloadIntegrationTest : public HttpProtocolIntegrationTest { +class OverloadIntegrationTest : public BaseOverloadIntegrationTest, + public HttpProtocolIntegrationTest { protected: void initializeOverloadManager(const envoy::config::overload::v3::OverloadAction& overload_action) { - const std::string overload_config = R"EOF( - refresh_interval: - seconds: 0 - nanos: 1000000 - resource_monitors: - - name: "envoy.resource_monitors.testonly.fake_resource_monitor" - typed_config: - "@type": type.googleapis.com/google.protobuf.Empty - )EOF"; - envoy::config::overload::v3::OverloadManager overload_manager_config = - TestUtility::parseYaml(overload_config); - *overload_manager_config.add_actions() = overload_action; - - config_helper_.addConfigModifier( - [overload_manager_config](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - *bootstrap.mutable_overload_manager() = overload_manager_config; - }); + setupOverloadManagerConfig(overload_action); + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + *bootstrap.mutable_overload_manager() = this->overload_manager_config_; + }); initialize(); updateResource(0); } - - void updateResource(double pressure) { - auto* monitor = fake_resource_monitor_factory_.monitor(); - ASSERT(monitor != nullptr); - monitor->setResourcePressure(pressure); - } - - FakeResourceMonitorFactory fake_resource_monitor_factory_; - Registry::InjectFactory inject_factory_{ - fake_resource_monitor_factory_}; }; INSTANTIATE_TEST_SUITE_P(Protocols, OverloadIntegrationTest, diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 8a31ce5dda486..2b1bfb5485efe 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -98,6 +98,32 @@ TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound404) { EXPECT_EQ("404", response->headers().getStatusValue()); } +TEST_P(DownstreamProtocolIntegrationTest, TestHostWhitespacee) { + config_helper_.addConfigModifier(&setDoNotValidateRouteConfig); + auto host = config_helper_.createVirtualHost("foo.com", "/unknown", "unknown_cluster"); + host.mutable_routes(0)->mutable_route()->set_cluster_not_found_response_code( + envoy::config::route::v3::RouteAction::NOT_FOUND); + config_helper_.addVirtualHost(host); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = codec_client_->startRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, {":authority", " foo.com "}, {":path", "/unknown"}}); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + // For HTTP/1 the whitespace will be stripped, and 404 returned as above. + if (downstreamProtocol() == Http::CodecType::HTTP1) { + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_EQ("404", response->headers().getStatusValue()); + EXPECT_TRUE(response->complete()); + } else { + // For HTTP/2 and above, the whitespace is illegal. + ASSERT_TRUE(response->waitForReset()); + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } +} + // Add a route that uses unknown cluster (expect 503 Service Unavailable). TEST_P(DownstreamProtocolIntegrationTest, RouterClusterNotFound503) { config_helper_.addConfigModifier(&setDoNotValidateRouteConfig); @@ -261,31 +287,6 @@ TEST_P(ProtocolIntegrationTest, ContinueHeadersOnlyInjectBodyFilter) { EXPECT_EQ(response->body(), "body"); } -// Tests a filter that returns a FilterHeadersStatus::Continue after a local reply. In debug mode, -// this fails on ENVOY_BUG. In opt mode, the status is corrected and the failure is logged. -TEST_P(DownstreamProtocolIntegrationTest, ContinueAfterLocalReply) { - config_helper_.addFilter(R"EOF( - name: continue-after-local-reply-filter - typed_config: - "@type": type.googleapis.com/google.protobuf.Empty - )EOF"); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - - // Send a headers only request. - IntegrationStreamDecoderPtr response; - EXPECT_ENVOY_BUG( - { - response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); - ASSERT_TRUE(response->waitForEndStream()); - EXPECT_TRUE(response->complete()); - EXPECT_EQ("200", response->headers().getStatusValue()); - }, - "envoy bug failure: !continue_iteration || !state_.local_complete_. " - "Details: Filter did not return StopAll or StopIteration after sending a local reply."); -} - TEST_P(ProtocolIntegrationTest, AddEncodedTrailers) { config_helper_.addFilter(R"EOF( name: add-trailers-filter @@ -1299,14 +1300,149 @@ TEST_P(ProtocolIntegrationTest, TwoRequests) { testTwoRequests(); } TEST_P(ProtocolIntegrationTest, TwoRequestsWithForcedBackup) { testTwoRequests(true); } -TEST_P(ProtocolIntegrationTest, BasicMaxStreamDuration) { testMaxStreamDuration(); } +TEST_P(ProtocolIntegrationTest, BasicMaxStreamDuration) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + ConfigHelper::HttpProtocolOptions protocol_options; + auto* http_protocol_options = protocol_options.mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(200)); + ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), + protocol_options); + }); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + if (downstream_protocol_ == Http::CodecType::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + ASSERT_TRUE(response->complete()); + } else { + ASSERT_TRUE(response->waitForEndStream()); + codec_client_->close(); + } +} + +TEST_P(ProtocolIntegrationTest, BasicDynamicMaxStreamDuration) { + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setEnvoyUpstreamStreamDurationMs(500); + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + if (downstream_protocol_ == Http::CodecType::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + ASSERT_TRUE(response->complete()); + } else { + ASSERT_TRUE(response->waitForEndStream()); + codec_client_->close(); + } +} TEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicy) { - testMaxStreamDurationWithRetry(false); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + ConfigHelper::HttpProtocolOptions protocol_options; + auto* http_protocol_options = protocol_options.mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); + ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), + protocol_options); + }); + Http::TestRequestHeaderMapImpl retriable_header = Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, {"x-envoy-retry-on", "5xx"}}; + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(retriable_header); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + + if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + } else { + ASSERT_TRUE(upstream_request_->waitForReset()); + } + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + upstream_request_->encodeHeaders(response_headers, true); + + response->waitForHeaders(); + codec_client_->close(); + + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); } TEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicyWhenRetryUpstreamDisconnection) { - testMaxStreamDurationWithRetry(true); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + ConfigHelper::HttpProtocolOptions protocol_options; + auto* http_protocol_options = protocol_options.mutable_common_http_protocol_options(); + http_protocol_options->mutable_max_stream_duration()->MergeFrom( + ProtobufUtil::TimeUtil::MillisecondsToDuration(1000)); + ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), + protocol_options); + }); + Http::TestRequestHeaderMapImpl retriable_header = Http::TestRequestHeaderMapImpl{ + {":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"x-forwarded-for", "10.0.0.1"}, {"x-envoy-retry-on", "5xx"}}; + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest(retriable_header); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + + if (fake_upstreams_[0]->httpType() == FakeHttpConnection::Type::HTTP1) { + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + } else { + ASSERT_TRUE(upstream_request_->waitForReset()); + } + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 1); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_max_duration_reached", 2); + if (downstream_protocol_ == Http::CodecType::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + ASSERT_TRUE(response->complete()); + } else { + ASSERT_TRUE(response->waitForEndStream()); + codec_client_->close(); + } + + EXPECT_EQ("408", response->headers().getStatusValue()); } // Verify that headers with underscores in their names are dropped from client requests @@ -1825,10 +1961,7 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyRequestTrailersRejected) { config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1()); Http::TestRequestTrailerMapImpl request_trailers; for (int i = 0; i < 150; i++) { - // TODO(alyssawilk) QUIC fails without the trailers being distinct because - // the checks are done before transformation. Either make the transformation - // use commas, or do QUIC checks before and after. - request_trailers.addCopy(absl::StrCat("trailer", i), std::string(1, 'a')); + request_trailers.addCopy("trailer", std::string(1, 'a')); } initialize(); @@ -2729,6 +2862,293 @@ TEST_P(ProtocolIntegrationTest, ReqRespSizeStats) { test_server_->waitUntilHistogramHasSamples("cluster.cluster_0.upstream_rs_headers_size"); } +// Verify that when a filter encodeHeaders callback overflows response buffer in filter manager the +// filter chain is aborted and 500 is sent to the client. +TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeHeaders) { + config_helper_.setBufferLimits(64 * 1024, 64 * 1024); + config_helper_.addFilter(R"EOF( + name: add-body-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig + where_to_add_body: ENCODE_HEADERS + body_size: 70000 + )EOF"); + config_helper_.addFilter(R"EOF( + name: crash-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig + crash_in_encode_headers: false + crash_in_encode_data: false + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, response_headers, 10, + 0, TestUtility::DefaultTimeout); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("500", response->headers().getStatusValue()); +} + +// Verify that when a filter encodeData callback overflows response buffer in filter manager the +// filter chain is aborted and 500 is sent to the client in case where upstream response headers +// have not yet been sent. +TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeDataWithResponseHeadersUnsent) { + config_helper_.setBufferLimits(64 * 1024, 64 * 1024); + // Buffer filter will stop iteration from encodeHeaders preventing response headers from being + // sent downstream. + config_helper_.addFilter(R"EOF( + name: encoder-decoder-buffer-filter + )EOF"); + config_helper_.addFilter(R"EOF( + name: crash-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig + crash_in_encode_headers: true + crash_in_encode_data: true + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + upstream_request_->encodeHeaders(response_headers, false); + // This much data should overflow the 64Kb response buffer. + upstream_request_->encodeData(16 * 1024, false); + upstream_request_->encodeData(64 * 1024, false); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("500", response->headers().getStatusValue()); +} + +// Verify that when a filter encodeData callback overflows response buffer in filter manager the +// filter chain is aborted and stream is reset in case where upstream response headers have already +// been sent. +TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeData) { + config_helper_.setBufferLimits(64 * 1024, 64 * 1024); + // Make the add-body-filter stop iteration from encodeData. Headers should be sent to the client. + config_helper_.addFilter(R"EOF( + name: add-body-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig + where_to_add_body: ENCODE_DATA + where_to_stop_and_buffer: ENCODE_DATA + body_size: 16384 + )EOF"); + config_helper_.addFilter(R"EOF( + name: crash-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig + crash_in_encode_headers: false + crash_in_encode_data: true + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + waitForNextUpstreamRequest(0); + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + upstream_request_->encodeHeaders(response_headers, false); + // This much data should cause the add-body-filter to overflow response buffer + upstream_request_->encodeData(16 * 1024, false); + upstream_request_->encodeData(64 * 1024, false); + ASSERT_TRUE(response->waitForReset()); + EXPECT_FALSE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// Verify that when a filter decodeHeaders callback overflows request buffer in filter manager the +// filter chain is aborted and 413 is sent to the client. +TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeHeaders) { + config_helper_.setBufferLimits(64 * 1024, 64 * 1024); + config_helper_.addFilter(R"EOF( + name: crash-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig + crash_in_decode_headers: true + )EOF"); + config_helper_.addFilter(R"EOF( + name: add-body-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig + where_to_add_body: DECODE_HEADERS + body_size: 70000 + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("413", response->headers().getStatusValue()); +} + +// Verify that when a filter decodeData callback overflows request buffer in filter manager the +// filter chain is aborted and 413 is sent to the client. +TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeData) { + config_helper_.setBufferLimits(64 * 1024, 64 * 1024); + config_helper_.addFilter(R"EOF( + name: crash-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig + crash_in_decode_headers: true + crash_in_decode_data: true + )EOF"); + // Buffer filter causes filter manager to buffer data + config_helper_.addFilter(R"EOF( + name: encoder-decoder-buffer-filter + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}}); + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + // This much data should overflow request buffer in filter manager + codec_client_->sendData(*request_encoder, 16 * 1024, false); + codec_client_->sendData(*request_encoder, 64 * 1024, false); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("413", response->headers().getStatusValue()); +} + +// Verify that when a filter decodeData callback overflows request buffer in filter manager the +// filter chain is aborted and 413 is sent to the client. In this test the overflow occurs after +// filter chain iteration was restarted. It is very similar to the test case above but some filter +// manager's internal state is slightly different. +TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeDataContinueIteration) { + config_helper_.setBufferLimits(64 * 1024, 64 * 1024); + config_helper_.addFilter(R"EOF( + name: crash-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig + crash_in_decode_headers: false + crash_in_decode_data: true + )EOF"); + config_helper_.addFilter(R"EOF( + name: add-body-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig + where_to_add_body: DECODE_DATA + body_size: 70000 + )EOF"); + config_helper_.addFilter(R"EOF( + name: encoder-decoder-buffer-filter + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}}); + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + // This should cause some data to be buffered without overflowing request buffer. + codec_client_->sendData(*request_encoder, 16 * 1024, false); + // The buffer filter will resume filter chain iteration and the next add-body-filter filter + // will overflow the request buffer. + codec_client_->sendData(*request_encoder, 16 * 1024, true); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("413", response->headers().getStatusValue()); +} + +// Adding data in decodeTrailers without any data in the filter manager's request buffer should work +// as it will overflow the pending_recv_data_ which will cause downstream window updates to stop. +TEST_P(DownstreamProtocolIntegrationTest, + OverflowDecoderBufferFromDecodeTrailersWithContinuedIteration) { + if (downstreamProtocol() == Http::CodecType::HTTP1) { + return; + } + config_helper_.setBufferLimits(64 * 1024, 64 * 1024); + config_helper_.addFilter(R"EOF( + name: add-body-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig + where_to_add_body: DECODE_TRAILERS + body_size: 70000 + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}}); + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + codec_client_->sendData(*request_encoder, 1024, false); + codec_client_->sendData(*request_encoder, 1024, false); + + codec_client_->sendTrailers(*request_encoder, + Http::TestRequestTrailerMapImpl{{"some", "trailer"}}); + + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// Adding data in decodeTrailers with some data in the filter manager's request buffer should case +// 413 as it will overflow the request buffer in filter manager. +TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeTrailers) { + if (downstreamProtocol() == Http::CodecType::HTTP1) { + return; + } + config_helper_.setBufferLimits(64 * 1024, 64 * 1024); + config_helper_.addFilter(R"EOF( + name: crash-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig + crash_in_decode_headers: false + crash_in_decode_data: true + crash_in_decode_trailers: true + )EOF"); + config_helper_.addFilter(R"EOF( + name: add-body-filter + typed_config: + "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig + where_to_add_body: DECODE_TRAILERS + where_to_stop_and_buffer: DECODE_DATA + body_size: 70000 + )EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {":authority", "host"}}); + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + codec_client_->sendData(*request_encoder, 1024, false); + codec_client_->sendData(*request_encoder, 1024, false); + + codec_client_->sendTrailers(*request_encoder, + Http::TestRequestTrailerMapImpl{{"some", "trailer"}}); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("413", response->headers().getStatusValue()); +} + TEST_P(ProtocolIntegrationTest, ResetLargeResponseUponReceivingHeaders) { if (downstreamProtocol() == Http::CodecType::HTTP1) { return; @@ -2765,4 +3185,38 @@ TEST_P(ProtocolIntegrationTest, ResetLargeResponseUponReceivingHeaders) { codec_client_->close(); } +TEST_P(DownstreamProtocolIntegrationTest, PathWithFragmentRejectedByDefault) { + initialize(); + + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/some/path#fragment"}, + {":scheme", "http"}, + {":authority", "foo.com"}}; + IntegrationStreamDecoderPtr response = codec_client_->makeRequestWithBody(request_headers, 10); + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("400", response->headers().getStatusValue()); +} + +TEST_P(ProtocolIntegrationTest, FragmentStrippedFromPathWithOverride) { + config_helper_.addRuntimeOverride("envoy.reloadable_features.http_reject_path_with_fragment", + "false"); + initialize(); + + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/some/path?p1=v1#fragment"}, + {":scheme", "http"}, + {":authority", "foo.com"}}; + Http::TestRequestHeaderMapImpl expected_request_headers{request_headers}; + expected_request_headers.setPath("/some/path?p1=v1"); + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + auto response = sendRequestAndWaitForResponse(expected_request_headers, 0, response_headers, 0, 0, + TestUtility::DefaultTimeout); + EXPECT_TRUE(upstream_request_->complete()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + } // namespace Envoy diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index 118f48296f878..4d48a8aee68ed 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -314,30 +314,33 @@ ProxyProtoFilterChainMatchIntegrationTest::ProxyProtoFilterChainMatchIntegration }); } +void ProxyProtoFilterChainMatchIntegrationTest::send(const std::string& data) { + initialize(); + + // Set verify to false because it is expected that Envoy will immediately disconnect after + // receiving the PROXY header, and it is a race whether the `write()` will fail due to + // disconnect, or finish the write before receiving the disconnect. + constexpr bool verify = false; + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + ASSERT_TRUE(tcp_client->write(data, false, verify)); + tcp_client->waitForDisconnect(); +} + INSTANTIATE_TEST_SUITE_P(IpVersions, ProxyProtoFilterChainMatchIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); // Validate that source IP and direct source IP match correctly. TEST_P(ProxyProtoFilterChainMatchIntegrationTest, MatchDirectSourceAndSource) { - initialize(); - - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(tcp_client->write("PROXY TCP4 1.2.3.4 254.254.254.254 12345 1234\r\nhello", false)); - tcp_client->waitForDisconnect(); - + send("PROXY TCP4 1.2.3.4 254.254.254.254 12345 1234\r\nhello"); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr("directsource_localhost_and_source_1.2.3.0/24 -")); } // Test that a mismatched direct source prevents matching a filter chain with a matching source. TEST_P(ProxyProtoFilterChainMatchIntegrationTest, MismatchDirectSourceButMatchSource) { - initialize(); - - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(tcp_client->write("PROXY TCP4 5.5.5.5 254.254.254.254 12345 1234\r\nhello", false)); - tcp_client->waitForDisconnect(); - + send("PROXY TCP4 5.5.5.5 254.254.254.254 12345 1234\r\nhello"); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr( absl::StrCat("- ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound))); @@ -346,12 +349,7 @@ TEST_P(ProxyProtoFilterChainMatchIntegrationTest, MismatchDirectSourceButMatchSo // Test that a more specific direct source match prevents matching a filter chain with a less // specific direct source match but matching source. TEST_P(ProxyProtoFilterChainMatchIntegrationTest, MoreSpecificDirectSource) { - initialize(); - - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(tcp_client->write("PROXY TCP4 6.6.6.6 254.254.254.254 12345 1234\r\nhello", false)); - tcp_client->waitForDisconnect(); - + send("PROXY TCP4 6.6.6.6 254.254.254.254 12345 1234\r\nhello"); EXPECT_THAT(waitForAccessLog(listener_access_log_name_), testing::HasSubstr( absl::StrCat("- ", StreamInfo::ResponseCodeDetails::get().FilterChainNotFound))); diff --git a/test/integration/proxy_proto_integration_test.h b/test/integration/proxy_proto_integration_test.h index 224b6b168eb82..bdc45933b7ffe 100644 --- a/test/integration/proxy_proto_integration_test.h +++ b/test/integration/proxy_proto_integration_test.h @@ -27,6 +27,8 @@ class ProxyProtoTcpIntegrationTest : public testing::TestWithParam { public: QuicHttpIntegrationTest() - : HttpIntegrationTest(Http::CodecType::HTTP3, GetParam().first, + : HttpIntegrationTest(Http::CodecType::HTTP3, GetParam(), ConfigHelper::quicHttpProxyConfig()), - supported_versions_([]() { - if (GetParam().second == QuicVersionType::GquicQuicCrypto) { - return quic::CurrentSupportedVersionsWithQuicCrypto(); - } - bool use_http3 = GetParam().second == QuicVersionType::Iquic; - SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); - SetQuicReloadableFlag(quic_disable_version_rfcv1, !use_http3); - return quic::CurrentSupportedVersions(); - }()), - conn_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *conn_helper_.GetClock()) {} + supported_versions_(quic::CurrentSupportedHttp3Versions()), conn_helper_(*dispatcher_), + alarm_factory_(*dispatcher_, *conn_helper_.GetClock()) { + // Enable this flag for test coverage. + SetQuicReloadableFlag(quic_tls_set_signature_algorithm_prefs, true); + } ~QuicHttpIntegrationTest() override { cleanupUpstreamAndDownstream(); @@ -172,13 +169,13 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers } constexpr auto timeout_first = std::chrono::seconds(15); constexpr auto timeout_subsequent = std::chrono::milliseconds(10); - if (GetParam().first == Network::Address::IpVersion::v4) { + if (GetParam() == Network::Address::IpVersion::v4) { test_server_->waitForCounterEq("listener.127.0.0.1_0.downstream_cx_total", 8u, timeout_first); } else { test_server_->waitForCounterEq("listener.[__1]_0.downstream_cx_total", 8u, timeout_first); } for (size_t i = 0; i < concurrency_; ++i) { - if (GetParam().first == Network::Address::IpVersion::v4) { + if (GetParam() == Network::Address::IpVersion::v4) { test_server_->waitForGaugeEq( fmt::format("listener.127.0.0.1_0.worker_{}.downstream_cx_active", i), 1u, timeout_subsequent); @@ -230,7 +227,8 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers }; INSTANTIATE_TEST_SUITE_P(QuicHttpIntegrationTests, QuicHttpIntegrationTest, - testing::ValuesIn(generateTestParam()), testParamsToString); + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); TEST_P(QuicHttpIntegrationTest, GetRequestAndEmptyResponse) { testRouterHeaderOnlyRequestAndResponse(); @@ -264,7 +262,7 @@ TEST_P(QuicHttpIntegrationTest, ZeroRtt) { ->EarlyDataAccepted()); // Close the second connection. codec_client_->close(); - if (GetParam().first == Network::Address::IpVersion::v4) { + if (GetParam() == Network::Address::IpVersion::v4) { test_server_->waitForCounterEq( "listener.127.0.0.1_0.http3.downstream.rx.quic_connection_close_error_" "code_QUIC_NO_ERROR", @@ -274,13 +272,8 @@ TEST_P(QuicHttpIntegrationTest, ZeroRtt) { "error_code_QUIC_NO_ERROR", 2u); } - if (GetParam().second == QuicVersionType::GquicQuicCrypto) { - test_server_->waitForCounterEq("http3.quic_version_50", 2u); - } else if (GetParam().second == QuicVersionType::GquicTls) { - test_server_->waitForCounterEq("http3.quic_version_51", 2u); - } else { - test_server_->waitForCounterEq("http3.quic_version_rfc_v1", 2u); - } + + test_server_->waitForCounterEq("http3.quic_version_rfc_v1", 2u); } // Ensure multiple quic connections work, regardless of platform BPF support @@ -289,10 +282,14 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicConnectionsDefaultMode) { } TEST_P(QuicHttpIntegrationTest, MultipleQuicConnectionsNoBPF) { - // Note: This runtime override is a no-op on platforms without BPF - config_helper_.addRuntimeOverride( - "envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing", "false"); - + // Note: This setting is a no-op on platforms without BPF + class DisableBpf { + public: + DisableBpf() { ActiveQuicListenerFactory::setDisableKernelBpfPacketRoutingForTest(true); } + ~DisableBpf() { ActiveQuicListenerFactory::setDisableKernelBpfPacketRoutingForTest(false); } + }; + + DisableBpf disable; testMultipleQuicConnections(); } @@ -340,6 +337,22 @@ TEST_P(QuicHttpIntegrationTest, PortMigration) { EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(1024u * 2, upstream_request_->bodyLength()); + + // Switch to a socket with bad socket options. + auto option = std::make_shared(); + EXPECT_CALL(*option, setOption(_, _)) + .WillRepeatedly( + Invoke([](Network::Socket&, envoy::config::core::v3::SocketOption::SocketState state) { + if (state == envoy::config::core::v3::SocketOption::STATE_LISTENING) { + return false; + } + return true; + })); + auto options = std::make_shared(); + options->push_back(option); + quic_connection_->switchConnectionSocket( + createConnectionSocket(server_addr_, local_addr, options)); + EXPECT_TRUE(codec_client_->disconnected()); cleanupUpstreamAndDownstream(); } @@ -352,13 +365,9 @@ TEST_P(QuicHttpIntegrationTest, CertVerificationFailure) { initialize(); codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http"))), absl::nullopt); EXPECT_FALSE(codec_client_->connected()); - std::string failure_reason = - GetParam().second == QuicVersionType::GquicQuicCrypto - ? "QUIC_PROOF_INVALID with details: Proof invalid: X509_verify_cert: certificate " - "verification error at depth 0: ok" - : "QUIC_TLS_CERTIFICATE_UNKNOWN with details: TLS handshake failure " - "(ENCRYPTION_HANDSHAKE) 46: " - "certificate unknown"; + std::string failure_reason = "QUIC_TLS_CERTIFICATE_UNKNOWN with details: TLS handshake failure " + "(ENCRYPTION_HANDSHAKE) 46: " + "certificate unknown"; EXPECT_EQ(failure_reason, codec_client_->connection()->transportFailureReason()); } @@ -390,12 +399,10 @@ TEST_P(QuicHttpIntegrationTest, Reset101SwitchProtocolResponse) { EXPECT_FALSE(response->complete()); // Verify stream error counters are correctly incremented. - std::string counter_scope = GetParam().first == Network::Address::IpVersion::v4 + std::string counter_scope = GetParam() == Network::Address::IpVersion::v4 ? "listener.127.0.0.1_0.http3.downstream.rx." : "listener.[__1]_0.http3.downstream.rx."; - std::string error_code = GetParam().second == QuicVersionType::Iquic - ? "quic_reset_stream_error_code_QUIC_STREAM_GENERAL_PROTOCOL_ERROR" - : "quic_reset_stream_error_code_QUIC_BAD_APPLICATION_PAYLOAD"; + std::string error_code = "quic_reset_stream_error_code_QUIC_STREAM_GENERAL_PROTOCOL_ERROR"; test_server_->waitForCounterEq(absl::StrCat(counter_scope, error_code), 1U); } @@ -414,5 +421,20 @@ TEST_P(QuicHttpIntegrationTest, ResetRequestWithoutAuthorityHeader) { EXPECT_EQ("400", response->headers().getStatusValue()); } +TEST_P(QuicHttpIntegrationTest, ResetRequestWithInvalidCharacter) { + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + std::string value = std::string(1, 2); + EXPECT_FALSE(Http::HeaderUtility::headerValueIsValid(value)); + default_request_headers_.addCopy("illegal_header", value); + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(response->waitForReset()); +} + } // namespace Quic } // namespace Envoy diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index 68f0193948a56..ff48c8922b704 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -111,6 +111,24 @@ TEST_P(RedirectIntegrationTest, RedirectNotConfigured) { response->headers().get(test_header_key_)[0]->value().getStringView()); } +// Verify that URI fragment in upstream server Location header is passed unmodified to the +// downstream client. +TEST_P(RedirectIntegrationTest, UpstreamRedirectPreservesURIFragmentInLocation) { + // Use base class initialize. + HttpProtocolIntegrationTest::initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + Http::TestResponseHeaderMapImpl redirect_response{ + {":status", "302"}, + {"content-length", "0"}, + {"location", "http://authority2/new/url?p1=v1&p2=v2#fragment"}}; + auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response, 0); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("http://authority2/new/url?p1=v1&p2=v2#fragment", + response->headers().getLocationValue()); +} + // Now test a route with redirects configured on in pass-through mode. TEST_P(RedirectIntegrationTest, InternalRedirectPassedThrough) { useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); @@ -172,6 +190,47 @@ TEST_P(RedirectIntegrationTest, BasicInternalRedirect) { EXPECT_THAT(waitForAccessLog(access_log_name_, 1), HasSubstr("200 via_upstream -\n")); } +TEST_P(RedirectIntegrationTest, InternalRedirectStripsUriFragment) { + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + waitForNextUpstreamRequest(); + + // Redirect to URI with fragment + Http::TestResponseHeaderMapImpl redirect_response{ + {":status", "302"}, + {"content-length", "0"}, + {"location", "http://authority2/new/url?p1=v1&p2=v2#fragment"}}; + + upstream_request_->encodeHeaders(redirect_response, true); + + waitForNextUpstreamRequest(); + ASSERT(upstream_request_->headers().EnvoyOriginalUrl() != nullptr); + EXPECT_EQ("http://handle.internal.redirect/test/long/url", + upstream_request_->headers().getEnvoyOriginalUrlValue()); + // During internal redirect Envoy always strips fragment from Location URI + EXPECT_EQ("/new/url?p1=v1&p2=v2", upstream_request_->headers().getPathValue()); + EXPECT_EQ("authority2", upstream_request_->headers().getHostValue()); + EXPECT_EQ("via_value", upstream_request_->headers().getViaValue()); + + upstream_request_->encodeHeaders(default_response_headers_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); +} + TEST_P(RedirectIntegrationTest, InternalRedirectWithRequestBody) { useAccessLog("%RESPONSE_FLAGS% %RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); // Validate that header sanitization is only called once. diff --git a/test/integration/rtds_integration_test.cc b/test/integration/rtds_integration_test.cc index 114b24a67957f..98e4489abf586 100644 --- a/test/integration/rtds_integration_test.cc +++ b/test/integration/rtds_integration_test.cc @@ -1,6 +1,7 @@ #include "envoy/service/runtime/v3/rtds.pb.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "test/test_common/utility.h" diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index 6daa40e230a84..c1a96010e6c57 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -7,9 +7,9 @@ #include "envoy/service/discovery/v3/discovery.pb.h" #include "source/common/config/api_version.h" -#include "source/common/config/version_converter.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "test/test_common/printers.h" #include "test/test_common/resources.h" diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index 8e7b7ad656f27..8b361628100fe 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -25,11 +25,13 @@ #include "test/common/grpc/grpc_client_integration.h" #include "test/config/integration/certs/clientcert_hash.h" +#include "test/extensions/transport_sockets/tls/test_private_key_method_provider.h" #include "test/integration/http_integration.h" #include "test/integration/server.h" #include "test/integration/ssl_utility.h" #include "test/mocks/secret/mocks.h" #include "test/test_common/network_utility.h" +#include "test/test_common/registry.h" #include "test/test_common/resources.h" #include "test/test_common/test_time_system.h" #include "test/test_common/utility.h" @@ -59,14 +61,17 @@ std::string sdsTestParamsToString(const ::testing::TestParamInfo& p) p.param.test_quic ? "UsesQuic" : "UsesTcp"); } -std::vector getSdsTestsParams() { +std::vector getSdsTestsParams(bool disable_quic = false) { std::vector ret; for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { for (auto sds_grpc_type : TestEnvironment::getsGrpcVersionsForTest()) { ret.push_back(TestParams{ip_version, sds_grpc_type, false}); #ifdef ENVOY_ENABLE_QUIC - ret.push_back(TestParams{ip_version, sds_grpc_type, true}); + if (!disable_quic) { + ret.push_back(TestParams{ip_version, sds_grpc_type, true}); + } #else + UNREFERENCED_PARAMETER(disable_quic); ENVOY_LOG_MISC(warn, "Skipping HTTP/3 as support is compiled out"); #endif } @@ -975,5 +980,144 @@ TEST_P(SdsCdsIntegrationTest, BasicSuccess) { test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); } +class SdsDynamicDownstreamPrivateKeyIntegrationTest : public SdsDynamicDownstreamIntegrationTest { +public: + envoy::extensions::transport_sockets::tls::v3::Secret getCurrentServerPrivateKeyProviderSecret() { + envoy::extensions::transport_sockets::tls::v3::Secret secret; + + const std::string yaml = + R"EOF( +name: "abc.com" +tls_certificate: + certificate_chain: + filename: "{{ test_tmpdir }}/root/current/servercert.pem" + private_key_provider: + provider_name: test + typed_config: + "@type": "type.googleapis.com/google.protobuf.Struct" + value: + private_key_file: "{{ test_tmpdir }}/root/current/serverkey.pem" + expected_operation: "sign" + sync_mode: true + mode: "rsa" +)EOF"; + + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret); + secret.set_name(server_cert_rsa_); + + return secret; + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, SdsDynamicDownstreamPrivateKeyIntegrationTest, + testing::ValuesIn(getSdsTestsParams(true)), sdsTestParamsToString); + +// Validate that a basic SDS updates work with a private key provider. +TEST_P(SdsDynamicDownstreamPrivateKeyIntegrationTest, BasicPrivateKeyProvider) { + v3_resource_api_ = true; + + TestEnvironment::exec( + {TestEnvironment::runfilesPath("test/integration/sds_dynamic_key_rotation_setup.sh")}); + + // Set up the private key provider. + Extensions::PrivateKeyMethodProvider::TestPrivateKeyMethodFactory test_factory; + Registry::InjectFactory + test_private_key_method_factory(test_factory); + + on_server_init_function_ = [this]() { + createSdsStream(*(fake_upstreams_[1])); + sendSdsResponse(getCurrentServerPrivateKeyProviderSecret()); + }; + initialize(); + + EXPECT_EQ(1, test_server_->counter("sds.server_cert_rsa.update_success")->value()); + EXPECT_EQ(0, test_server_->counter("sds.server_cert_rsa.update_rejected")->value()); + + ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { + return makeSslClientConnection(); + }; + testRouterHeaderOnlyRequestAndResponse(&creator); + + cleanupUpstreamAndDownstream(); +} + +class SdsCdsPrivateKeyIntegrationTest : public SdsCdsIntegrationTest { +public: + envoy::extensions::transport_sockets::tls::v3::Secret getCurrentServerPrivateKeyProviderSecret() { + envoy::extensions::transport_sockets::tls::v3::Secret secret; + const std::string yaml = + R"EOF( +name: "abc.com" +tls_certificate: + certificate_chain: + filename: "{{ test_tmpdir }}/root/current/servercert.pem" + private_key_provider: + provider_name: test + typed_config: + "@type": "type.googleapis.com/google.protobuf.Struct" + value: + private_key_file: "{{ test_tmpdir }}/root/current/serverkey.pem" + expected_operation: "sign" + sync_mode: true + mode: "rsa" +)EOF"; + + TestUtility::loadFromYaml(TestEnvironment::substitute(yaml), secret); + secret.set_name(client_cert_); + + return secret; + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, SdsCdsPrivateKeyIntegrationTest, + testing::ValuesIn(getSdsTestsParams(true)), sdsTestParamsToString); + +// Test private key providers in SDS+CDS setup. +TEST_P(SdsCdsPrivateKeyIntegrationTest, BasicSdsCdsPrivateKeyProvider) { + v3_resource_api_ = true; + + TestEnvironment::exec( + {TestEnvironment::runfilesPath("test/integration/sds_dynamic_key_rotation_setup.sh")}); + + // Set up the private key provider. + Extensions::PrivateKeyMethodProvider::TestPrivateKeyMethodFactory test_factory; + Registry::InjectFactory + test_private_key_method_factory(test_factory); + + on_server_init_function_ = [this]() { + { + // CDS. + AssertionResult result = + fake_upstreams_[1]->waitForHttpConnection(*dispatcher_, xds_connection_); + EXPECT_TRUE(result); + result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); + EXPECT_TRUE(result); + xds_stream_->startGrpcStream(); + sendCdsResponse(); + } + { + // SDS. + AssertionResult result = + fake_upstreams_[2]->waitForHttpConnection(*dispatcher_, sds_connection_); + EXPECT_TRUE(result); + + result = sds_connection_->waitForNewStream(*dispatcher_, sds_stream_); + EXPECT_TRUE(result); + sds_stream_->startGrpcStream(); + sendSdsResponse2(getCurrentServerPrivateKeyProviderSecret(), *sds_stream_); + } + }; + initialize(); + + test_server_->waitForCounterGe( + "cluster.dynamic.client_ssl_socket_factory.ssl_context_update_by_sds", 1); + // The 4 clusters are CDS,SDS,static and dynamic cluster. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 4); + + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, {}, {}, + {}, "42"); + // Successfully removed the dynamic cluster. + test_server_->waitForGaugeEq("cluster_manager.active_clusters", 3); +} } // namespace Ssl } // namespace Envoy diff --git a/test/integration/sds_generic_secret_integration_test.cc b/test/integration/sds_generic_secret_integration_test.cc index 581870cab2c74..5d9c64c921b2b 100644 --- a/test/integration/sds_generic_secret_integration_test.cc +++ b/test/integration/sds_generic_secret_integration_test.cc @@ -9,6 +9,7 @@ #include "source/common/config/datasource.h" #include "source/common/grpc/common.h" +#include "test/config/v2_link_hacks.h" #include "test/extensions/filters/http/common/empty_http_filter_config.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" diff --git a/test/integration/server.cc b/test/integration/server.cc index 2325ccc592b6f..451c951d20e3a 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -31,7 +31,7 @@ OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::str Network::Address::IpVersion ip_version, FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, - Server::DrainStrategy drain_strategy, bool v2_bootstrap) { + Server::DrainStrategy drain_strategy) { OptionsImpl test_options("cluster_name", "node_name", "zone_name", spdlog::level::info); test_options.setConfigPath(config_path); @@ -46,9 +46,6 @@ OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::str test_options.setIgnoreUnknownFieldsDynamic(validation_config.ignore_unknown_dynamic_fields); test_options.setConcurrency(concurrency); test_options.setHotRestartDisabled(true); - if (v2_bootstrap) { - test_options.setBootstrapVersion(2); - } return test_options; } @@ -62,7 +59,7 @@ IntegrationTestServerPtr IntegrationTestServer::create( Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, - Buffer::WatermarkFactorySharedPtr watermark_factory, bool use_real_stats, bool v2_bootstrap) { + Buffer::WatermarkFactorySharedPtr watermark_factory, bool use_real_stats) { IntegrationTestServerPtr server{ std::make_unique(time_system, api, config_path, use_real_stats)}; if (server_ready_function != nullptr) { @@ -70,7 +67,7 @@ IntegrationTestServerPtr IntegrationTestServer::create( } server->start(version, on_server_init_function, deterministic, defer_listener_finalization, process_object, validation_config, concurrency, drain_time, drain_strategy, - watermark_factory, v2_bootstrap); + watermark_factory); return server; } @@ -98,20 +95,22 @@ void IntegrationTestServer::unsetDynamicContextParam(absl::string_view resource_ }); } -void IntegrationTestServer::start( - const Network::Address::IpVersion version, std::function on_server_init_function, - bool deterministic, bool defer_listener_finalization, ProcessObjectOptRef process_object, - Server::FieldValidationConfig validator_config, uint32_t concurrency, - std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, - Buffer::WatermarkFactorySharedPtr watermark_factory, bool v2_bootstrap) { +void IntegrationTestServer::start(const Network::Address::IpVersion version, + std::function on_server_init_function, bool deterministic, + bool defer_listener_finalization, + ProcessObjectOptRef process_object, + Server::FieldValidationConfig validator_config, + uint32_t concurrency, std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy, + Buffer::WatermarkFactorySharedPtr watermark_factory) { ENVOY_LOG(info, "starting integration test server"); ASSERT(!thread_); - thread_ = api_.threadFactory().createThread( - [version, deterministic, process_object, validator_config, concurrency, drain_time, - drain_strategy, watermark_factory, v2_bootstrap, this]() -> void { - threadRoutine(version, deterministic, process_object, validator_config, concurrency, - drain_time, drain_strategy, watermark_factory, v2_bootstrap); - }); + thread_ = api_.threadFactory().createThread([version, deterministic, process_object, + validator_config, concurrency, drain_time, + drain_strategy, watermark_factory, this]() -> void { + threadRoutine(version, deterministic, process_object, validator_config, concurrency, drain_time, + drain_strategy, watermark_factory); + }); // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init. // Note that there is no synchronization guaranteeing this happens either @@ -188,11 +187,9 @@ void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion vers Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, - Buffer::WatermarkFactorySharedPtr watermark_factory, - bool v2_bootstrap) { + Buffer::WatermarkFactorySharedPtr watermark_factory) { OptionsImpl options(Server::createTestOptionsImpl(config_path_, "", version, validation_config, - concurrency, drain_time, drain_strategy, - v2_bootstrap)); + concurrency, drain_time, drain_strategy)); Thread::MutexBasicLockable lock; Random::RandomGeneratorPtr random_generator; @@ -238,7 +235,7 @@ void IntegrationTestServerImpl::createAndRunEnvoyServer( // This is technically thread unsafe (assigning to a shared_ptr accessed // across threads), but because we synchronize below through serverReady(), the only // consumer on the main test thread in ~IntegrationTestServerImpl will not race. - admin_address_ = server.admin().socket().addressProvider().localAddress(); + admin_address_ = server.admin().socket().connectionInfoProvider().localAddress(); server_ = &server; stat_store_ = &stat_store; serverReady(); diff --git a/test/integration/server.h b/test/integration/server.h index 7e5ddd9fa93dc..da51efe2eabf7 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -45,8 +45,7 @@ createTestOptionsImpl(const std::string& config_path, const std::string& config_ FieldValidationConfig validation_config = FieldValidationConfig(), uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1), - Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual, - bool v2_bootstrap = false); + Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual); class TestComponentFactory : public ComponentFactory { public: @@ -282,6 +281,21 @@ class TestIsolatedStoreImpl : public StoreRoot { Thread::LockGuard lock(lock_); return store_.counterFromStatNameWithTags(name, tags); } + void forEachCounter(std::function f_size, + std::function f_stat) const override { + Thread::LockGuard lock(lock_); + store_.forEachCounter(f_size, f_stat); + } + void forEachGauge(std::function f_size, + std::function f_stat) const override { + Thread::LockGuard lock(lock_); + store_.forEachGauge(f_size, f_stat); + } + void forEachTextReadout(std::function f_size, + std::function f_stat) const override { + Thread::LockGuard lock(lock_); + store_.forEachTextReadout(f_size, f_stat); + } Counter& counterFromString(const std::string& name) override { Thread::LockGuard lock(lock_); return store_.counterFromString(name); @@ -398,18 +412,16 @@ class IntegrationTestServer : public Logger::Loggable, public IntegrationTestServerStats, public Server::ComponentFactory { public: - static IntegrationTestServerPtr - create(const std::string& config_path, const Network::Address::IpVersion version, - std::function on_server_ready_function, - std::function on_server_init_function, bool deterministic, - Event::TestTimeSystem& time_system, Api::Api& api, - bool defer_listener_finalization = false, - ProcessObjectOptRef process_object = absl::nullopt, - Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), - uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1), - Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual, - Buffer::WatermarkFactorySharedPtr watermark_factory = nullptr, bool use_real_stats = false, - bool v2_bootstrap = false); + static IntegrationTestServerPtr create( + const std::string& config_path, const Network::Address::IpVersion version, + std::function on_server_ready_function, + std::function on_server_init_function, bool deterministic, + Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization = false, + ProcessObjectOptRef process_object = absl::nullopt, + Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), + uint32_t concurrency = 1, std::chrono::seconds drain_time = std::chrono::seconds(1), + Server::DrainStrategy drain_strategy = Server::DrainStrategy::Gradual, + Buffer::WatermarkFactorySharedPtr watermark_factory = nullptr, bool use_real_stats = false); // Note that the derived class is responsible for tearing down the server in its // destructor. ~IntegrationTestServer() override; @@ -437,7 +449,7 @@ class IntegrationTestServer : public Logger::Loggable, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, - Buffer::WatermarkFactorySharedPtr watermark_factory, bool v2_bootstrap); + Buffer::WatermarkFactorySharedPtr watermark_factory); void waitForCounterEq(const std::string& name, uint64_t value, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout, @@ -465,6 +477,11 @@ class IntegrationTestServer : public Logger::Loggable, notifyingStatsAllocator().waitForCounterExists(name); } + // TODO(#17956): Add Gauge type to NotifyingAllocator and adopt it in this method. + void waitForGaugeDestroyed(const std::string& name) override { + ASSERT_TRUE(TestUtility::waitForGaugeDestroyed(statStore(), name, time_system_)); + } + void waitUntilHistogramHasSamples( const std::string& name, std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) override { @@ -545,7 +562,7 @@ class IntegrationTestServer : public Logger::Loggable, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, - Buffer::WatermarkFactorySharedPtr watermark_factory, bool v2_bootstrap); + Buffer::WatermarkFactorySharedPtr watermark_factory); Event::TestTimeSystem& time_system_; Api::Api& api_; diff --git a/test/integration/server_stats.h b/test/integration/server_stats.h index 66cb7e07e7d27..d4520d3456db5 100644 --- a/test/integration/server_stats.h +++ b/test/integration/server_stats.h @@ -66,6 +66,12 @@ class IntegrationTestServerStats { waitForGaugeEq(const std::string& name, uint64_t value, std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()) PURE; + /** + * Wait for a gauge to be destroyed. Note that MockStatStore does not destroy stat. + * @param name gauge name. + */ + virtual void waitForGaugeDestroyed(const std::string& name) PURE; + /** * Counter lookup. This is not thread safe, since we don't get a consistent * snapshot, uses counters() instead for this behavior. diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 43ad25d1096a1..ffe0dcbc742c3 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -268,6 +268,8 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) { // 2020/08/11 12202 37061 38500 router: add new retry back-off strategy // 2020/09/11 12973 38993 upstream: predictive preconnect // 2020/10/02 13251 39326 switch to google tcmalloc + // 2021/08/15 17290 40349 add all host map to priority set for fast host + // searching // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI // 'release' builds, where we control the platform and tool-chain. So you @@ -288,7 +290,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) { // https://github.com/envoyproxy/envoy/issues/12209 // EXPECT_MEMORY_EQ(m_per_cluster, 37061); } - EXPECT_MEMORY_LE(m_per_cluster, 40000); // Round up to allow platform variations. + EXPECT_MEMORY_LE(m_per_cluster, 40350); // Round up to allow platform variations. } TEST_P(ClusterMemoryTestRunner, MemoryLargeHostSizeWithStats) { diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 8062772d73e39..691bcd8df6118 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -867,27 +867,6 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, expectEndpointToMatchRoute(); } -// Test subset load balancing for a deprecated_v1 route when endpoint selector is defined at the top -// level. -TEST_P(TcpProxyMetadataMatchIntegrationTest, - DEPRECATED_FEATURE_TEST(EndpointShouldMatchRouteWithTopLevelMetadataMatch)) { - tcp_proxy_.set_stat_prefix("tcp_stats"); - tcp_proxy_.set_cluster("fallback"); - tcp_proxy_.mutable_hidden_envoy_deprecated_deprecated_v1()->add_routes()->set_cluster( - "cluster_0"); - tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); - - endpoint_metadata_ = lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}}); - - config_helper_.addRuntimeOverride("envoy.deprecated_features:envoy.extensions.filters.network." - "tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1", - "true"); - initialize(); - - expectEndpointToMatchRoute(); -} - // Test subset load balancing for a weighted cluster when endpoint selector is defined on a weighted // cluster. TEST_P(TcpProxyMetadataMatchIntegrationTest, EndpointShouldMatchWeightedClusterWithMetadataMatch) { @@ -958,27 +937,6 @@ TEST_P(TcpProxyMetadataMatchIntegrationTest, expectEndpointNotToMatchRoute(); } -// Test subset load balancing for a deprecated_v1 route when endpoint selector is defined at the top -// level. -TEST_P(TcpProxyMetadataMatchIntegrationTest, - DEPRECATED_FEATURE_TEST(EndpointShouldNotMatchRouteWithTopLevelMetadataMatch)) { - tcp_proxy_.set_stat_prefix("tcp_stats"); - tcp_proxy_.set_cluster("fallback"); - tcp_proxy_.mutable_hidden_envoy_deprecated_deprecated_v1()->add_routes()->set_cluster( - "cluster_0"); - tcp_proxy_.mutable_metadata_match()->MergeFrom( - lbMetadata({{"role", "primary"}, {"version", "v1"}, {"stage", "prod"}})); - - endpoint_metadata_ = lbMetadata({{"role", "replica"}, {"version", "v1"}, {"stage", "prod"}}); - - config_helper_.addRuntimeOverride("envoy.deprecated_features:envoy.extensions.filters.network." - "tcp_proxy.v3.TcpProxy.hidden_envoy_deprecated_deprecated_v1", - "true"); - initialize(); - - expectEndpointNotToMatchRoute(); -} - // Test subset load balancing for a weighted cluster when endpoint selector is defined on a weighted // cluster. TEST_P(TcpProxyMetadataMatchIntegrationTest, diff --git a/test/integration/tracked_watermark_buffer.cc b/test/integration/tracked_watermark_buffer.cc index 1a14d888a1d52..664036e279235 100644 --- a/test/integration/tracked_watermark_buffer.cc +++ b/test/integration/tracked_watermark_buffer.cc @@ -1,5 +1,6 @@ #include "test/integration/tracked_watermark_buffer.h" +#include "envoy/config/overload/v3/overload.pb.h" #include "envoy/thread/thread.h" #include "envoy/thread_local/thread_local.h" #include "envoy/thread_local/thread_local_object.h" @@ -9,6 +10,18 @@ namespace Envoy { namespace Buffer { +TrackedWatermarkBufferFactory::TrackedWatermarkBufferFactory() : TrackedWatermarkBufferFactory(0) {} + +TrackedWatermarkBufferFactory::TrackedWatermarkBufferFactory( + uint32_t minimum_account_to_track_power_of_two) + : WatermarkBufferFactory([minimum_account_to_track_power_of_two]() { + auto config = envoy::config::overload::v3::BufferFactoryConfig(); + if (minimum_account_to_track_power_of_two > 0) { + config.set_minimum_account_to_track_power_of_two(minimum_account_to_track_power_of_two); + } + return config; + }()) {} + TrackedWatermarkBufferFactory::~TrackedWatermarkBufferFactory() { ASSERT(active_buffer_count_ == 0); } @@ -22,6 +35,7 @@ TrackedWatermarkBufferFactory::createBuffer(std::function below_low_wate ++active_buffer_count_; BufferInfo& buffer_info = buffer_infos_[idx]; return std::make_unique( + // update_size [this, &buffer_info](uint64_t current_size) { absl::MutexLock lock(&mutex_); total_buffer_size_ = total_buffer_size_ + current_size - buffer_info.current_size_; @@ -32,10 +46,12 @@ TrackedWatermarkBufferFactory::createBuffer(std::function below_low_wate checkIfExpectedBalancesMet(); }, + // update_high_watermark [this, &buffer_info](uint32_t watermark) { absl::MutexLock lock(&mutex_); buffer_info.watermark_ = watermark; }, + // on_delete [this, &buffer_info](TrackedWatermarkBuffer* buffer) { absl::MutexLock lock(&mutex_); ASSERT(active_buffer_count_ > 0); @@ -63,6 +79,7 @@ TrackedWatermarkBufferFactory::createBuffer(std::function below_low_wate } } }, + // on_bind [this](BufferMemoryAccountSharedPtr& account, TrackedWatermarkBuffer* buffer) { absl::MutexLock lock(&mutex_); // Only track non-null accounts. @@ -74,6 +91,23 @@ TrackedWatermarkBufferFactory::createBuffer(std::function below_low_wate below_low_watermark, above_high_watermark, above_overflow_watermark); } +BufferMemoryAccountSharedPtr +TrackedWatermarkBufferFactory::createAccount(Http::StreamResetHandler& reset_handler) { + auto account = WatermarkBufferFactory::createAccount(reset_handler); + if (account != nullptr) { + absl::MutexLock lock(&mutex_); + ++total_accounts_created_; + } + return account; +} + +void TrackedWatermarkBufferFactory::unregisterAccount(const BufferMemoryAccountSharedPtr& account, + absl::optional current_class) { + WatermarkBufferFactory::unregisterAccount(account, current_class); + absl::MutexLock lock(&mutex_); + ++total_accounts_unregistered_; +} + uint64_t TrackedWatermarkBufferFactory::numBuffersCreated() const { absl::MutexLock lock(&mutex_); return buffer_infos_.size(); @@ -139,6 +173,21 @@ std::pair TrackedWatermarkBufferFactory::highWatermarkRange( return std::make_pair(min_watermark, max_watermark); } +uint64_t TrackedWatermarkBufferFactory::numAccountsCreated() const { + absl::MutexLock lock(&mutex_); + return total_accounts_created_; +} + +bool TrackedWatermarkBufferFactory::waitForExpectedAccountUnregistered( + uint64_t expected_accounts_unregistered, std::chrono::milliseconds timeout) { + absl::MutexLock lock(&mutex_); + auto predicate = [this, expected_accounts_unregistered]() ABSL_SHARED_LOCKS_REQUIRED(mutex_) { + mutex_.AssertHeld(); + return expected_accounts_unregistered == total_accounts_unregistered_; + }; + return mutex_.AwaitWithTimeout(absl::Condition(&predicate), absl::Milliseconds(timeout.count())); +} + bool TrackedWatermarkBufferFactory::waitUntilTotalBufferedExceeds( uint64_t byte_size, std::chrono::milliseconds timeout) { absl::MutexLock lock(&mutex_); @@ -196,6 +245,11 @@ void TrackedWatermarkBufferFactory::inspectAccounts( done_notification.WaitForNotification(); } +void TrackedWatermarkBufferFactory::inspectMemoryClasses( + std::function func) { + func(size_class_account_sets_); +} + void TrackedWatermarkBufferFactory::setExpectedAccountBalance(uint64_t byte_size_per_account, uint32_t num_accounts) { absl::MutexLock lock(&mutex_); @@ -231,7 +285,7 @@ void TrackedWatermarkBufferFactory::checkIfExpectedBalancesMet() { // This is thread safe since this function should run on the only Envoy worker // thread. for (auto& acc : account_infos_) { - if (static_cast(acc.first.get())->balance() < + if (static_cast(acc.first.get())->balance() < expected_balances_->balance_per_account_) { return; } diff --git a/test/integration/tracked_watermark_buffer.h b/test/integration/tracked_watermark_buffer.h index 999e955037fc6..3e70045dbaa3a 100644 --- a/test/integration/tracked_watermark_buffer.h +++ b/test/integration/tracked_watermark_buffer.h @@ -59,14 +59,19 @@ class TrackedWatermarkBuffer : public Buffer::WatermarkBuffer { }; // Factory that tracks how the created buffers are used. -class TrackedWatermarkBufferFactory : public Buffer::WatermarkFactory { +class TrackedWatermarkBufferFactory : public WatermarkBufferFactory { public: - TrackedWatermarkBufferFactory() = default; + // Use the default minimum tracking threshold. + TrackedWatermarkBufferFactory(); + TrackedWatermarkBufferFactory(uint32_t min_tracking_bytes); ~TrackedWatermarkBufferFactory() override; // Buffer::WatermarkFactory Buffer::InstancePtr createBuffer(std::function below_low_watermark, std::function above_high_watermark, std::function above_overflow_watermark) override; + BufferMemoryAccountSharedPtr createAccount(Http::StreamResetHandler& reset_handler) override; + void unregisterAccount(const BufferMemoryAccountSharedPtr& account, + absl::optional current_class) override; // Number of buffers created. uint64_t numBuffersCreated() const; @@ -82,6 +87,17 @@ class TrackedWatermarkBufferFactory : public Buffer::WatermarkFactory { // functionality is disabled. std::pair highWatermarkRange() const; + // Number of accounts created. + uint64_t numAccountsCreated() const; + + // Waits for the expected number of accounts unregistered. Unlike + // numAccountsCreated, there are no pre-existing hooks into Envoy when an + // account unregistered call occurs as it depends upon deferred delete. + // This creates the synchronization needed. + bool waitForExpectedAccountUnregistered( + uint64_t expected_accounts_unregistered, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + // Total bytes currently buffered across all known buffers. uint64_t totalBytesBuffered() const { absl::MutexLock lock(&mutex_); @@ -116,9 +132,17 @@ class TrackedWatermarkBufferFactory : public Buffer::WatermarkFactory { using AccountToBoundBuffersMap = absl::flat_hash_map>; + // Used to inspect all accounts tied to any buffer created from this factory. void inspectAccounts(std::function func, Server::Instance& server); + // Used to inspect the memory class to accounts within that class structure. + // This differs from inspectAccounts as that has all accounts bounded to an + // active buffer, while this might not track certain accounts (e.g. below + // thresholds.) As implemented this is NOT thread-safe! + void inspectMemoryClasses( + std::function func); + private: // Remove "dangling" accounts; accounts where the account_info map is the only // entity still pointing to the account. @@ -148,6 +172,10 @@ class TrackedWatermarkBufferFactory : public Buffer::WatermarkFactory { uint64_t active_buffer_count_ ABSL_GUARDED_BY(mutex_) = 0; // total bytes buffered across all buffers. uint64_t total_buffer_size_ ABSL_GUARDED_BY(mutex_) = 0; + // total number of accounts created + uint64_t total_accounts_created_ ABSL_GUARDED_BY(mutex_) = 0; + // total number of accounts unregistered + uint64_t total_accounts_unregistered_ ABSL_GUARDED_BY(mutex_) = 0; // Info about the buffer, by buffer idx. absl::node_hash_map buffer_infos_ ABSL_GUARDED_BY(mutex_); // The expected balances for the accounts. If set, when a buffer updates its diff --git a/test/integration/tracked_watermark_buffer_test.cc b/test/integration/tracked_watermark_buffer_test.cc index 734fa5cd31aee..1f4b3569a098c 100644 --- a/test/integration/tracked_watermark_buffer_test.cc +++ b/test/integration/tracked_watermark_buffer_test.cc @@ -7,6 +7,7 @@ #include "test/integration/tracked_watermark_buffer.h" #include "test/mocks/common.h" +#include "test/mocks/http/stream_reset_handler.h" #include "test/test_common/test_runtime.h" #include "test/test_common/thread_factory_for_test.h" @@ -21,7 +22,8 @@ namespace { class TrackedWatermarkBufferTest : public testing::Test { public: - TrackedWatermarkBufferFactory factory_; + TrackedWatermarkBufferFactory factory_{absl::bit_width(4096u)}; + Http::MockStreamResetHandler mock_reset_handler_; }; TEST_F(TrackedWatermarkBufferTest, WatermarkFunctions) { @@ -131,7 +133,7 @@ TEST_F(TrackedWatermarkBufferTest, TracksNumberOfBuffersActivelyBound) { auto buffer1 = factory_.createBuffer([]() {}, []() {}, []() {}); auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); auto buffer3 = factory_.createBuffer([]() {}, []() {}, []() {}); - BufferMemoryAccountSharedPtr account = std::make_shared(); + auto account = factory_.createAccount(mock_reset_handler_); ASSERT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(0, 0)); buffer1->bindAccount(account); @@ -141,7 +143,8 @@ TEST_F(TrackedWatermarkBufferTest, TracksNumberOfBuffersActivelyBound) { buffer3->bindAccount(account); EXPECT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(1, 3)); - // Release test access to the account. + // Release test and account access to shared_this. + account->clearDownstream(); account.reset(); buffer3.reset(); @@ -156,7 +159,7 @@ TEST_F(TrackedWatermarkBufferTest, TracksNumberOfAccountsActive) { auto buffer1 = factory_.createBuffer([]() {}, []() {}, []() {}); auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); auto buffer3 = factory_.createBuffer([]() {}, []() {}, []() {}); - BufferMemoryAccountSharedPtr account1 = std::make_shared(); + auto account1 = factory_.createAccount(mock_reset_handler_); ASSERT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(0, 0)); buffer1->bindAccount(account1); @@ -164,10 +167,12 @@ TEST_F(TrackedWatermarkBufferTest, TracksNumberOfAccountsActive) { buffer2->bindAccount(account1); EXPECT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(1, 2)); - // Release test access to the account. + // Release test and account access to shared_this. + account1->clearDownstream(); account1.reset(); - buffer3->bindAccount(std::make_shared()); + auto account2 = factory_.createAccount(mock_reset_handler_); + buffer3->bindAccount(account2); EXPECT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(2, 3)); buffer2.reset(); @@ -175,6 +180,10 @@ TEST_F(TrackedWatermarkBufferTest, TracksNumberOfAccountsActive) { buffer1.reset(); EXPECT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(1, 1)); + // Release test and account access to shared_this. + account2->clearDownstream(); + account2.reset(); + buffer3.reset(); EXPECT_TRUE(factory_.waitUntilExpectedNumberOfAccountsAndBoundBuffers(0, 0)); } @@ -182,8 +191,8 @@ TEST_F(TrackedWatermarkBufferTest, TracksNumberOfAccountsActive) { TEST_F(TrackedWatermarkBufferTest, WaitForExpectedAccountBalanceShouldReturnTrueWhenConditionsMet) { auto buffer1 = factory_.createBuffer([]() {}, []() {}, []() {}); auto buffer2 = factory_.createBuffer([]() {}, []() {}, []() {}); - BufferMemoryAccountSharedPtr account1 = std::make_shared(); - BufferMemoryAccountSharedPtr account2 = std::make_shared(); + auto account1 = factory_.createAccount(mock_reset_handler_); + auto account2 = factory_.createAccount(mock_reset_handler_); buffer1->bindAccount(account1); buffer2->bindAccount(account2); @@ -194,6 +203,9 @@ TEST_F(TrackedWatermarkBufferTest, WaitForExpectedAccountBalanceShouldReturnTrue buffer2->add("Now we have expected balances!"); EXPECT_TRUE(factory_.waitForExpectedAccountBalanceWithTimeout(std::chrono::seconds(0))); + + account1->clearDownstream(); + account2->clearDownstream(); } } // namespace diff --git a/test/integration/utility.cc b/test/integration/utility.cc index c6746cae7869d..a6eb50a495aab 100644 --- a/test/integration/utility.cc +++ b/test/integration/utility.cc @@ -5,6 +5,7 @@ #include #include +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/dispatcher.h" #include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h" #include "envoy/network/connection.h" @@ -187,8 +188,9 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt NiceMock random; Event::GlobalTimeSystem time_system; NiceMock random_generator; + envoy::config::bootstrap::v3::Bootstrap bootstrap; Api::Impl api(Thread::threadFactoryForTest(), mock_stats_store, time_system, - Filesystem::fileSystemForTest(), random_generator); + Filesystem::fileSystemForTest(), random_generator, bootstrap); Event::DispatcherPtr dispatcher(api.allocateDispatcher("test_thread")); TestConnectionCallbacks connection_callbacks(*dispatcher); diff --git a/test/integration/version_integration_test.cc b/test/integration/version_integration_test.cc index 2f7a5aa2e5e00..2ee09525e7519 100644 --- a/test/integration/version_integration_test.cc +++ b/test/integration/version_integration_test.cc @@ -24,21 +24,6 @@ const char ExampleIpTaggingConfig[] = R"EOF( - {address_prefix: 1.2.3.4, prefix_len: 32} )EOF"; -// envoy.filters.http.ip_tagging from v2 Struct config. -TEST_P(VersionIntegrationTest, DEPRECATED_FEATURE_TEST(IpTaggingV2StaticStructConfig)) { - config_helper_.addFilter(absl::StrCat(R"EOF( - name: envoy.filters.http.ip_tagging - hidden_envoy_deprecated_config: - )EOF", - ExampleIpTaggingConfig)); - - config_helper_.addRuntimeOverride( - "envoy.deprecated_features:envoy.extensions.filters.network." - "http_connection_manager.v3.HttpFilter.hidden_envoy_deprecated_config", - "true"); - initialize(); -} - // envoy.filters.http.ip_tagging from v3 TypedStruct config. TEST_P(VersionIntegrationTest, IpTaggingV3StaticTypedStructConfig) { config_helper_.addFilter(absl::StrCat(R"EOF( diff --git a/test/integration/vhds_integration_test.cc b/test/integration/vhds_integration_test.cc index 3af21e7945fea..8edd51e96e8b0 100644 --- a/test/integration/vhds_integration_test.cc +++ b/test/integration/vhds_integration_test.cc @@ -8,6 +8,7 @@ #include "source/common/protobuf/utility.h" #include "test/common/grpc/grpc_client_integration.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/http_integration.h" #include "test/integration/utility.h" #include "test/test_common/network_utility.h" diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index fe6a3cca164f4..8e866132782b7 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -301,6 +301,9 @@ class LdsInplaceUpdateHttpIntegrationTest std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); config_helper_.addListenerFilter(tls_inspector_config); config_helper_.addSslConfig(); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.mutable_stat_prefix()->assign("hcm0"); }); config_helper_.addConfigModifier([this, add_default_filter_chain]( envoy::config::bootstrap::v3::Bootstrap& bootstrap) { if (!use_default_balancer_) { @@ -335,6 +338,7 @@ class LdsInplaceUpdateHttpIntegrationTest ->mutable_routes(0) ->mutable_route() ->set_cluster("cluster_1"); + hcm_config.mutable_stat_prefix()->assign("hcm1"); config_blob->PackFrom(hcm_config); bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( *bootstrap.mutable_static_resources()->mutable_clusters(0)); @@ -381,7 +385,7 @@ class LdsInplaceUpdateHttpIntegrationTest } } - void expectConnenctionServed(std::string alpn = "alpn0") { + void expectConnectionServed(std::string alpn = "alpn0") { auto codec_client_after_config_update = createHttpCodec(alpn); expectResponseHeaderConnectionClose(*codec_client_after_config_update, false); codec_client_after_config_update->close(); @@ -395,7 +399,7 @@ class LdsInplaceUpdateHttpIntegrationTest }; // Verify that http response on filter chain 1 and default filter chain have "Connection: close" -// header when these 2 filter chains are deleted during the listener update. +// header when these 2 filter chains are deleted during the listener update. TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { inplaceInitialize(/*add_default_filter_chain=*/true); @@ -403,12 +407,6 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { auto codec_client_0 = createHttpCodec("alpn0"); auto codec_client_default = createHttpCodec("alpndefault"); - Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get(), - c_default = codec_client_default.get()]() { - c1->close(); - c0->close(); - c_default->close(); - }); ConfigHelper new_config_helper( version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); new_config_helper.addConfigModifier( @@ -422,12 +420,20 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + test_server_->waitForGaugeGe("http.hcm0.downstream_cx_active", 1); + test_server_->waitForGaugeGe("http.hcm1.downstream_cx_active", 1); + expectResponseHeaderConnectionClose(*codec_client_1, true); expectResponseHeaderConnectionClose(*codec_client_default, true); test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 0); expectResponseHeaderConnectionClose(*codec_client_0, false); - expectConnenctionServed(); + expectConnectionServed(); + + codec_client_1->close(); + test_server_->waitForGaugeDestroyed("http.hcm1.downstream_cx_active"); + codec_client_0->close(); + codec_client_default->close(); } // Verify that http clients of filter chain 0 survives if new listener config adds new filter @@ -438,15 +444,19 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { auto codec_client_0 = createHttpCodec("alpn0"); Cleanup cleanup0([c0 = codec_client_0.get()]() { c0->close(); }); + test_server_->waitForGaugeGe("http.hcm0.downstream_cx_active", 1); + ConfigHelper new_config_helper( version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); new_config_helper.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); + // Note that HCM2 copies the stats prefix from HCM0 + listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(0)); *listener->mutable_filter_chains(2) ->mutable_filter_chain_match() ->mutable_application_protocols(0) = "alpn2"; + auto default_filter_chain = bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_default_filter_chain(); default_filter_chain->MergeFrom(*listener->mutable_filter_chains(1)); @@ -458,6 +468,9 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { auto codec_client_2 = createHttpCodec("alpn2"); auto codec_client_default = createHttpCodec("alpndefault"); + // 1 connection from filter chain 0 and 1 connection from filter chain 2. + test_server_->waitForGaugeGe("http.hcm0.downstream_cx_active", 2); + Cleanup cleanup2([c2 = codec_client_2.get(), c_default = codec_client_default.get()]() { c2->close(); c_default->close(); @@ -465,7 +478,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { expectResponseHeaderConnectionClose(*codec_client_2, false); expectResponseHeaderConnectionClose(*codec_client_default, false); expectResponseHeaderConnectionClose(*codec_client_0, false); - expectConnenctionServed(); + expectConnectionServed(); } // Verify that http clients of default filter chain is drained and recreated if the default filter @@ -493,7 +506,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigUpdatingDefaultFilterCha Cleanup cleanup2([c_default_v3 = codec_client_default_v3.get()]() { c_default_v3->close(); }); expectResponseHeaderConnectionClose(*codec_client_default, true); expectResponseHeaderConnectionClose(*codec_client_default_v3, false); - expectConnenctionServed(); + expectConnectionServed(); } // Verify that balancer is inherited. Test only default balancer because ExactConnectionBalancer @@ -515,7 +528,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, OverlappingFilterChainServesNewConne new_config_helper.setLds("1"); test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); expectResponseHeaderConnectionClose(*codec_client_0, false); - expectConnenctionServed(); + expectConnectionServed(); } // Verify default filter chain update is filter chain only update. @@ -587,7 +600,7 @@ TEST_P(LdsIntegrationTest, NewListenerWithBadPostListenSocketOption) { [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); listener->mutable_address()->mutable_socket_address()->set_port_value( - addr_socket.second->addressProvider().localAddress()->ip()->port()); + addr_socket.second->connectionInfoProvider().localAddress()->ip()->port()); auto socket_option = listener->add_socket_options(); socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_LISTENING); socket_option->set_level(10000); // Invalid level. diff --git a/test/mocks/BUILD b/test/mocks/BUILD index 0e0967ddfab87..eb4bcfe415d7b 100644 --- a/test/mocks/BUILD +++ b/test/mocks/BUILD @@ -14,6 +14,7 @@ envoy_cc_test_library( hdrs = ["common.h"], deps = [ "//envoy/common:conn_pool_interface", + "//envoy/common:key_value_store_interface", "//envoy/common:random_generator_interface", "//envoy/common:time_interface", "//source/common/common:minimal_logger_lib", diff --git a/test/mocks/api/mocks.cc b/test/mocks/api/mocks.cc index dfd9345ae8063..c3eeed40d0c5c 100644 --- a/test/mocks/api/mocks.cc +++ b/test/mocks/api/mocks.cc @@ -8,6 +8,7 @@ using testing::_; using testing::Invoke; +using testing::ReturnRef; namespace Envoy { namespace Api { @@ -16,6 +17,7 @@ MockApi::MockApi() { ON_CALL(*this, fileSystem()).WillByDefault(ReturnRef(file_system_)); ON_CALL(*this, rootScope()).WillByDefault(ReturnRef(stats_store_)); ON_CALL(*this, randomGenerator()).WillByDefault(ReturnRef(random_)); + ON_CALL(*this, bootstrap()).WillByDefault(ReturnRef(empty_bootstrap_)); } MockApi::~MockApi() = default; diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index 94b5db59f99c2..ef6f02c999cbd 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -5,6 +5,7 @@ #include "envoy/api/api.h" #include "envoy/api/os_sys_calls.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" @@ -47,12 +48,14 @@ class MockApi : public Api { MOCK_METHOD(Thread::ThreadFactory&, threadFactory, ()); MOCK_METHOD(Stats::Scope&, rootScope, ()); MOCK_METHOD(Random::RandomGenerator&, randomGenerator, ()); + MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, bootstrap, (), (const)); MOCK_METHOD(ProcessContextOptRef, processContext, ()); testing::NiceMock file_system_; Event::GlobalTimeSystem time_system_; testing::NiceMock stats_store_; testing::NiceMock random_; + envoy::config::bootstrap::v3::Bootstrap empty_bootstrap_; }; class MockOsSysCalls : public OsSysCallsImpl { diff --git a/test/mocks/buffer/mocks.h b/test/mocks/buffer/mocks.h index 22a215206939b..20adb026f6e5b 100644 --- a/test/mocks/buffer/mocks.h +++ b/test/mocks/buffer/mocks.h @@ -87,6 +87,9 @@ class MockBufferFactory : public Buffer::WatermarkFactory { MOCK_METHOD(Buffer::Instance*, createBuffer_, (std::function below_low, std::function above_high, std::function above_overflow)); + + MOCK_METHOD(Buffer::BufferMemoryAccountSharedPtr, createAccount, (Http::StreamResetHandler&)); + MOCK_METHOD(uint64_t, resetAccountsGivenPressure, (float)); }; MATCHER_P(BufferEqual, rhs, testing::PrintToString(*rhs)) { diff --git a/test/mocks/common.cc b/test/mocks/common.cc index ea4012729a8c1..fe64936013253 100644 --- a/test/mocks/common.cc +++ b/test/mocks/common.cc @@ -1,5 +1,7 @@ #include "test/mocks/common.h" +using testing::_; +using testing::ByMove; using testing::Return; namespace Envoy { @@ -22,4 +24,9 @@ ReadyWatcher::~ReadyWatcher() = default; MockTimeSystem::MockTimeSystem() = default; MockTimeSystem::~MockTimeSystem() = default; +MockKeyValueStoreFactory::MockKeyValueStoreFactory() { + ON_CALL(*this, createStore(_, _, _, _)) + .WillByDefault(Return(ByMove(std::make_unique()))); +} + } // namespace Envoy diff --git a/test/mocks/common.h b/test/mocks/common.h index d313358e1fb90..b887865fe9bf5 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -3,6 +3,7 @@ #include #include "envoy/common/conn_pool.h" +#include "envoy/common/key_value_store.h" #include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" #include "envoy/common/time.h" @@ -116,6 +117,27 @@ class MockRandomGenerator : public RandomGenerator { const std::string uuid_{"a121e9e1-feae-4136-9e0e-6fac343d56c9"}; }; + } // namespace Random +class MockKeyValueStore : public KeyValueStore { +public: + MOCK_METHOD(void, addOrUpdate, (absl::string_view, absl::string_view)); + MOCK_METHOD(void, remove, (absl::string_view)); + MOCK_METHOD(absl::optional, get, (absl::string_view)); + MOCK_METHOD(void, flush, ()); + MOCK_METHOD(void, iterate, (ConstIterateCb), (const)); +}; + +class MockKeyValueStoreFactory : public KeyValueStoreFactory { +public: + MockKeyValueStoreFactory(); + MOCK_METHOD(KeyValueStorePtr, createStore, + (const Protobuf::Message&, ProtobufMessage::ValidationVisitor&, Event::Dispatcher&, + Filesystem::Instance&)); + MOCK_METHOD(ProtobufTypes::MessagePtr, createEmptyConfigProto, ()); + std::string category() const override { return "envoy.common.key_value"; } + std::string name() const override { return "mock_key_value_store_factory"; } +}; + } // namespace Envoy diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index ae12fa665c6d5..ffcfdc796831b 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -122,6 +122,8 @@ class MockGrpcMux : public GrpcMux { MOCK_METHOD(void, requestOnDemandUpdate, (const std::string& type_url, const absl::flat_hash_set& add_these_names)); + + MOCK_METHOD(bool, paused, (const std::string& type_url), (const)); }; class MockGrpcStreamCallbacks diff --git a/test/mocks/http/BUILD b/test/mocks/http/BUILD index 51c6c7f2fae1d..063aa41e96586 100644 --- a/test/mocks/http/BUILD +++ b/test/mocks/http/BUILD @@ -108,3 +108,11 @@ envoy_cc_test( "//test/test_common:utility_lib", ], ) + +envoy_cc_mock( + name = "stream_reset_handler_mock", + hdrs = ["stream_reset_handler.h"], + deps = [ + "//envoy/http:stream_reset_handler_interface", + ], +) diff --git a/test/mocks/http/stream.cc b/test/mocks/http/stream.cc index 1a3d4e8bcae67..19181d8c26ed6 100644 --- a/test/mocks/http/stream.cc +++ b/test/mocks/http/stream.cc @@ -29,7 +29,11 @@ MockStream::MockStream() { [this](Buffer::BufferMemoryAccountSharedPtr account) -> void { account_ = account; })); } -MockStream::~MockStream() = default; +MockStream::~MockStream() { + if (account_) { + account_->clearDownstream(); + } +} } // namespace Http } // namespace Envoy diff --git a/test/mocks/http/stream_reset_handler.h b/test/mocks/http/stream_reset_handler.h new file mode 100644 index 0000000000000..6469a7b124cc0 --- /dev/null +++ b/test/mocks/http/stream_reset_handler.h @@ -0,0 +1,19 @@ +#pragma once + +#include "envoy/http/stream_reset_handler.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Http { + +class MockStreamResetHandler : public StreamResetHandler { +public: + MockStreamResetHandler() = default; + + // Http::StreamResetHandler + MOCK_METHOD(void, resetStream, (StreamResetReason reason)); +}; + +} // namespace Http +} // namespace Envoy diff --git a/test/mocks/network/BUILD b/test/mocks/network/BUILD index 38cd2003b09d7..3e3757f1ea2f0 100644 --- a/test/mocks/network/BUILD +++ b/test/mocks/network/BUILD @@ -47,6 +47,7 @@ envoy_cc_mock( hdrs = ["mocks.h"], deps = [ ":connection_mocks", + ":io_handle_mocks", ":transport_socket_mocks", "//envoy/buffer:buffer_interface", "//envoy/network:connection_interface", diff --git a/test/mocks/network/connection.cc b/test/mocks/network/connection.cc index adcb9c971dd18..dc22294b3fbeb 100644 --- a/test/mocks/network/connection.cc +++ b/test/mocks/network/connection.cc @@ -54,10 +54,10 @@ void MockConnectionBase::runLowWatermarkCallbacks() { } template static void initializeMockConnection(T& connection) { - ON_CALL(connection, addressProvider()) - .WillByDefault(ReturnPointee(connection.stream_info_.downstream_address_provider_)); - ON_CALL(connection, addressProviderSharedPtr()) - .WillByDefault(ReturnPointee(&connection.stream_info_.downstream_address_provider_)); + ON_CALL(connection, connectionInfoProvider()) + .WillByDefault(ReturnPointee(connection.stream_info_.downstream_connection_info_provider_)); + ON_CALL(connection, connectionInfoProviderSharedPtr()) + .WillByDefault(ReturnPointee(&connection.stream_info_.downstream_connection_info_provider_)); ON_CALL(connection, dispatcher()).WillByDefault(ReturnRef(connection.dispatcher_)); ON_CALL(connection, readEnabled()).WillByDefault(ReturnPointee(&connection.read_enabled_)); ON_CALL(connection, addConnectionCallbacks(_)) @@ -81,7 +81,7 @@ template static void initializeMockConnection(T& connection) { connection.raiseEvent(Network::ConnectionEvent::LocalClose); })); ON_CALL(connection, id()).WillByDefault(Return(connection.next_id_)); - connection.stream_info_.downstream_address_provider_->setConnectionID(connection.id_); + connection.stream_info_.downstream_connection_info_provider_->setConnectionID(connection.id_); ON_CALL(connection, state()).WillByDefault(ReturnPointee(&connection.state_)); // The real implementation will move the buffer data into the socket. @@ -94,16 +94,16 @@ template static void initializeMockConnection(T& connection) { } MockConnection::MockConnection() { - stream_info_.downstream_address_provider_->setRemoteAddress( + stream_info_.downstream_connection_info_provider_->setRemoteAddress( Utility::resolveUrl("tcp://10.0.0.3:50000")); initializeMockConnection(*this); } MockConnection::~MockConnection() = default; MockServerConnection::MockServerConnection() { - stream_info_.downstream_address_provider_->setRemoteAddress( + stream_info_.downstream_connection_info_provider_->setRemoteAddress( Utility::resolveUrl("tcp://10.0.0.1:443")); - stream_info_.downstream_address_provider_->setLocalAddress( + stream_info_.downstream_connection_info_provider_->setLocalAddress( Utility::resolveUrl("tcp://10.0.0.2:40000")); initializeMockConnection(*this); } @@ -111,9 +111,9 @@ MockServerConnection::MockServerConnection() { MockServerConnection::~MockServerConnection() = default; MockClientConnection::MockClientConnection() { - stream_info_.downstream_address_provider_->setRemoteAddress( + stream_info_.downstream_connection_info_provider_->setRemoteAddress( Utility::resolveUrl("tcp://10.0.0.1:443")); - stream_info_.downstream_address_provider_->setLocalAddress( + stream_info_.downstream_connection_info_provider_->setLocalAddress( Utility::resolveUrl("tcp://10.0.0.2:40000")); initializeMockConnection(*this); } @@ -121,7 +121,7 @@ MockClientConnection::MockClientConnection() { MockClientConnection::~MockClientConnection() = default; MockFilterManagerConnection::MockFilterManagerConnection() { - stream_info_.downstream_address_provider_->setRemoteAddress( + stream_info_.downstream_connection_info_provider_->setRemoteAddress( Utility::resolveUrl("tcp://10.0.0.3:50000")); initializeMockConnection(*this); diff --git a/test/mocks/network/connection.h b/test/mocks/network/connection.h index 3ec78cb5d47b1..949b6e2b09560 100644 --- a/test/mocks/network/connection.h +++ b/test/mocks/network/connection.h @@ -65,8 +65,8 @@ class MockConnectionBase { MOCK_METHOD(void, readDisable, (bool disable)); \ MOCK_METHOD(void, detectEarlyCloseWhenReadDisabled, (bool)); \ MOCK_METHOD(bool, readEnabled, (), (const)); \ - MOCK_METHOD(const SocketAddressProvider&, addressProvider, (), (const)); \ - MOCK_METHOD(SocketAddressProviderSharedPtr, addressProviderSharedPtr, (), (const)); \ + MOCK_METHOD(const ConnectionInfoProvider&, connectionInfoProvider, (), (const)); \ + MOCK_METHOD(ConnectionInfoProviderSharedPtr, connectionInfoProviderSharedPtr, (), (const)); \ MOCK_METHOD(absl::optional, \ unixSocketPeerCredentials, (), (const)); \ MOCK_METHOD(void, setConnectionStats, (const ConnectionStats& stats)); \ @@ -102,7 +102,7 @@ class MockServerConnection : public ServerConnection, public MockConnectionBase DEFINE_MOCK_CONNECTION_MOCK_METHODS; // Network::ServerConnection - MOCK_METHOD(void, setTransportSocketConnectTimeout, (std::chrono::milliseconds)); + MOCK_METHOD(void, setTransportSocketConnectTimeout, (std::chrono::milliseconds, Stats::Counter&)); }; /** diff --git a/test/mocks/network/mocks.cc b/test/mocks/network/mocks.cc index f07685c37f246..04958420157bc 100644 --- a/test/mocks/network/mocks.cc +++ b/test/mocks/network/mocks.cc @@ -37,7 +37,7 @@ MockListenerConfig::MockListenerConfig() ON_CALL(*this, filterChainFactory()).WillByDefault(ReturnRef(filter_chain_factory_)); ON_CALL(*this, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_)); ON_CALL(socket_factory_, localAddress()) - .WillByDefault(ReturnRef(socket_->addressProvider().localAddress())); + .WillByDefault(ReturnRef(socket_->connectionInfoProvider().localAddress())); ON_CALL(socket_factory_, getListenSocket(_)).WillByDefault(Return(socket_)); ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); @@ -132,8 +132,8 @@ MockFilterChainFactory::MockFilterChainFactory() { MockFilterChainFactory::~MockFilterChainFactory() = default; MockListenSocket::MockListenSocket() - : io_handle_(std::make_unique()), - address_provider_(std::make_shared( + : io_handle_(std::make_unique>()), + connection_info_provider_(std::make_shared( std::make_shared(80), nullptr)) { ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); ON_CALL(*this, ioHandle()).WillByDefault(ReturnRef(*io_handle_)); @@ -143,7 +143,7 @@ MockListenSocket::MockListenSocket() return socket_is_open_; })); ON_CALL(*this, ipVersion()) - .WillByDefault(Return(address_provider_->localAddress()->ip()->version())); + .WillByDefault(Return(connection_info_provider_->localAddress()->ip()->version())); ON_CALL(*this, duplicate()).WillByDefault(Invoke([]() { return std::make_unique>(); })); @@ -157,13 +157,13 @@ MockSocketOption::~MockSocketOption() = default; MockConnectionSocket::MockConnectionSocket() : io_handle_(std::make_unique()), - address_provider_( - std::make_shared(std::make_shared(80), - std::make_shared(80))) { + connection_info_provider_( + std::make_shared(std::make_shared(80), + std::make_shared(80))) { ON_CALL(*this, ioHandle()).WillByDefault(ReturnRef(*io_handle_)); ON_CALL(testing::Const(*this), ioHandle()).WillByDefault(ReturnRef(*io_handle_)); ON_CALL(*this, ipVersion()) - .WillByDefault(Return(address_provider_->localAddress()->ip()->version())); + .WillByDefault(Return(connection_info_provider_->localAddress()->ip()->version())); } MockConnectionSocket::~MockConnectionSocket() = default; diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index 1145db10d1460..22ea2a6d1f663 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -25,6 +25,7 @@ #include "test/mocks/event/mocks.h" #include "test/mocks/network/connection.h" +#include "test/mocks/network/io_handle.h" #include "test/mocks/stream_info/mocks.h" #include "test/test_common/printers.h" @@ -242,10 +243,12 @@ class MockListenSocket : public Socket { void addOption(const Socket::OptionConstSharedPtr& option) override { addOption_(option); } void addOptions(const Socket::OptionsSharedPtr& options) override { addOptions_(options); } - SocketAddressSetter& addressProvider() override { return *address_provider_; } - const SocketAddressProvider& addressProvider() const override { return *address_provider_; } - SocketAddressProviderSharedPtr addressProviderSharedPtr() const override { - return address_provider_; + ConnectionInfoSetter& connectionInfoProvider() override { return *connection_info_provider_; } + const ConnectionInfoProvider& connectionInfoProvider() const override { + return *connection_info_provider_; + } + ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override { + return connection_info_provider_; } MOCK_METHOD(IoHandle&, ioHandle, ()); MOCK_METHOD(SocketPtr, duplicate, ()); @@ -270,8 +273,8 @@ class MockListenSocket : public Socket { (unsigned long, void*, unsigned long, void*, unsigned long, unsigned long*)); MOCK_METHOD(Api::SysCallIntResult, setBlockingForTest, (bool)); - IoHandlePtr io_handle_; - Network::SocketAddressSetterSharedPtr address_provider_; + std::unique_ptr io_handle_; + Network::ConnectionInfoSetterSharedPtr connection_info_provider_; OptionsSharedPtr options_; bool socket_is_open_ = true; }; @@ -296,10 +299,12 @@ class MockConnectionSocket : public ConnectionSocket { void addOption(const Socket::OptionConstSharedPtr& option) override { addOption_(option); } void addOptions(const Socket::OptionsSharedPtr& options) override { addOptions_(options); } - SocketAddressSetter& addressProvider() override { return *address_provider_; } - const SocketAddressProvider& addressProvider() const override { return *address_provider_; } - SocketAddressProviderSharedPtr addressProviderSharedPtr() const override { - return address_provider_; + ConnectionInfoSetter& connectionInfoProvider() override { return *connection_info_provider_; } + const ConnectionInfoProvider& connectionInfoProvider() const override { + return *connection_info_provider_; + } + ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override { + return connection_info_provider_; } MOCK_METHOD(void, setDetectedTransportProtocol, (absl::string_view)); MOCK_METHOD(absl::string_view, detectedTransportProtocol, (), (const)); @@ -333,7 +338,7 @@ class MockConnectionSocket : public ConnectionSocket { MOCK_METHOD(void, dumpState, (std::ostream&, int), (const)); IoHandlePtr io_handle_; - std::shared_ptr address_provider_; + std::shared_ptr connection_info_provider_; bool is_closed_; }; diff --git a/test/mocks/network/socket.h b/test/mocks/network/socket.h index b4f3ac4e6b43b..ace4536e81a18 100644 --- a/test/mocks/network/socket.h +++ b/test/mocks/network/socket.h @@ -14,10 +14,12 @@ class MockSocket : public Socket { MockSocket(); ~MockSocket() override; - SocketAddressSetter& addressProvider() override { return *address_provider_; } - const SocketAddressProvider& addressProvider() const override { return *address_provider_; } - SocketAddressProviderSharedPtr addressProviderSharedPtr() const override { - return address_provider_; + ConnectionInfoSetter& connectionInfoProvider() override { return *connection_info_provider_; } + const ConnectionInfoProvider& connectionInfoProvider() const override { + return *connection_info_provider_; + } + ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override { + return connection_info_provider_; } IoHandle& ioHandle() override; const IoHandle& ioHandle() const override; @@ -43,7 +45,7 @@ class MockSocket : public Socket { MOCK_METHOD(void, addOptions, (const Socket::OptionsSharedPtr&), (override)); const std::unique_ptr io_handle_; - Network::SocketAddressSetterSharedPtr address_provider_; + Network::ConnectionInfoSetterSharedPtr connection_info_provider_; }; } // namespace Network diff --git a/test/mocks/router/mocks.cc b/test/mocks/router/mocks.cc index 93de670c3a2d7..8ab3d486fba10 100644 --- a/test/mocks/router/mocks.cc +++ b/test/mocks/router/mocks.cc @@ -99,7 +99,6 @@ MockRouteEntry::MockRouteEntry() { ON_CALL(*this, virtualHost()).WillByDefault(ReturnRef(virtual_host_)); ON_CALL(*this, includeVirtualHostRateLimits()).WillByDefault(Return(true)); ON_CALL(*this, pathMatchCriterion()).WillByDefault(ReturnRef(path_match_criterion_)); - ON_CALL(*this, metadata()).WillByDefault(ReturnRef(metadata_)); ON_CALL(*this, upgradeMap()).WillByDefault(ReturnRef(upgrade_map_)); ON_CALL(*this, hedgePolicy()).WillByDefault(ReturnRef(hedge_policy_)); ON_CALL(*this, routeName()).WillByDefault(ReturnRef(route_name_)); @@ -131,6 +130,7 @@ MockRoute::MockRoute() { ON_CALL(*this, routeEntry()).WillByDefault(Return(&route_entry_)); ON_CALL(*this, decorator()).WillByDefault(Return(&decorator_)); ON_CALL(*this, tracingConfig()).WillByDefault(Return(nullptr)); + ON_CALL(*this, metadata()).WillByDefault(ReturnRef(metadata_)); } MockRoute::~MockRoute() = default; diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 141c3934886ab..98c1c6710b201 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -388,10 +388,7 @@ class MockRouteEntry : public RouteEntry { MOCK_METHOD(const CorsPolicy*, corsPolicy, (), (const)); MOCK_METHOD(absl::optional, currentUrlPathAfterRewrite, (const Http::RequestHeaderMap&), (const)); - MOCK_METHOD(const envoy::config::core::v3::Metadata&, metadata, (), (const)); - MOCK_METHOD(const Envoy::Config::TypedMetadata&, typedMetadata, (), (const)); MOCK_METHOD(const PathMatchCriterion&, pathMatchCriterion, (), (const)); - MOCK_METHOD(const RouteSpecificFilterConfig*, perFilterConfig, (const std::string&), (const)); MOCK_METHOD(bool, includeAttemptCountInRequest, (), (const)); MOCK_METHOD(bool, includeAttemptCountInResponse, (), (const)); MOCK_METHOD(const absl::optional&, connectConfig, (), (const)); @@ -413,7 +410,6 @@ class MockRouteEntry : public RouteEntry { MockTlsContextMatchCriteria tls_context_matches_criteria_; TestCorsPolicy cors_policy_; testing::NiceMock path_match_criterion_; - envoy::config::core::v3::Metadata metadata_; UpgradeMap upgrade_map_; absl::optional connect_config_; }; @@ -454,10 +450,18 @@ class MockRoute : public Route { MOCK_METHOD(const Decorator*, decorator, (), (const)); MOCK_METHOD(const RouteTracing*, tracingConfig, (), (const)); MOCK_METHOD(const RouteSpecificFilterConfig*, perFilterConfig, (const std::string&), (const)); + MOCK_METHOD(const RouteSpecificFilterConfig*, mostSpecificPerFilterConfig, (const std::string&), + (const)); + MOCK_METHOD(void, traversePerFilterConfig, + (const std::string&, std::function), + (const)); + MOCK_METHOD(const envoy::config::core::v3::Metadata&, metadata, (), (const)); + MOCK_METHOD(const Envoy::Config::TypedMetadata&, typedMetadata, (), (const)); testing::NiceMock route_entry_; testing::NiceMock decorator_; testing::NiceMock route_tracing_; + envoy::config::core::v3::Metadata metadata_; }; class MockConfig : public Config { diff --git a/test/mocks/router/router_filter_interface.h b/test/mocks/router/router_filter_interface.h index 7ca27675a3d53..55aa4d9a42210 100644 --- a/test/mocks/router/router_filter_interface.h +++ b/test/mocks/router/router_filter_interface.h @@ -37,6 +37,7 @@ class MockRouterFilterInterface : public RouterFilterInterface { MOCK_METHOD(Upstream::ClusterInfoConstSharedPtr, cluster, ()); MOCK_METHOD(FilterConfig&, config, ()); MOCK_METHOD(FilterUtility::TimeoutData, timeout, ()); + MOCK_METHOD(absl::optional, dynamicMaxStreamDuration, (), (const)); MOCK_METHOD(Envoy::Http::RequestHeaderMap*, downstreamHeaders, ()); MOCK_METHOD(Envoy::Http::RequestTrailerMap*, downstreamTrailers, ()); MOCK_METHOD(bool, downstreamResponseStarted, (), (const)); diff --git a/test/mocks/server/BUILD b/test/mocks/server/BUILD index c26d734bad79f..669071ba8c8d3 100644 --- a/test/mocks/server/BUILD +++ b/test/mocks/server/BUILD @@ -121,6 +121,7 @@ envoy_cc_mock( deps = [ "//envoy/server:drain_manager_interface", "//envoy/server:listener_manager_interface", + "//test/mocks/network:io_handle_mocks", "//test/mocks/network:network_mocks", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h index 41f3c20bf4400..ec7e6f8659ef0 100644 --- a/test/mocks/server/factory_context.h +++ b/test/mocks/server/factory_context.h @@ -38,6 +38,7 @@ class MockFactoryContext : public virtual FactoryContext { MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); MOCK_METHOD(Server::Admin&, admin, ()); MOCK_METHOD(Stats::Scope&, listenerScope, ()); + MOCK_METHOD(bool, isQuicListener, (), (const)); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const)); MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const)); diff --git a/test/mocks/server/listener_factory_context.h b/test/mocks/server/listener_factory_context.h index a72237a2eaf4e..095aad5931dcb 100644 --- a/test/mocks/server/listener_factory_context.h +++ b/test/mocks/server/listener_factory_context.h @@ -40,6 +40,7 @@ class MockListenerFactoryContext : public ListenerFactoryContext { MOCK_METHOD(ThreadLocal::Instance&, threadLocal, ()); MOCK_METHOD(Server::Admin&, admin, ()); MOCK_METHOD(Stats::Scope&, listenerScope, ()); + MOCK_METHOD(bool, isQuicListener, (), (const)); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); MOCK_METHOD(const envoy::config::core::v3::Metadata&, listenerMetadata, (), (const)); MOCK_METHOD(envoy::config::core::v3::TrafficDirection, direction, (), (const)); diff --git a/test/mocks/server/options.cc b/test/mocks/server/options.cc index 09409e2772c53..8c48e026f81dd 100644 --- a/test/mocks/server/options.cc +++ b/test/mocks/server/options.cc @@ -18,7 +18,6 @@ MockOptions::MockOptions(const std::string& config_path) : config_path_(config_p ON_CALL(*this, configPath()).WillByDefault(ReturnRef(config_path_)); ON_CALL(*this, configProto()).WillByDefault(ReturnRef(config_proto_)); ON_CALL(*this, configYaml()).WillByDefault(ReturnRef(config_yaml_)); - ON_CALL(*this, bootstrapVersion()).WillByDefault(ReturnRef(bootstrap_version_)); ON_CALL(*this, allowUnknownStaticFields()).WillByDefault(Invoke([this] { return allow_unknown_static_fields_; })); diff --git a/test/mocks/server/options.h b/test/mocks/server/options.h index 9bc6e721ea68f..35bc8ff5c26aa 100644 --- a/test/mocks/server/options.h +++ b/test/mocks/server/options.h @@ -22,7 +22,6 @@ class MockOptions : public Options { MOCK_METHOD(const std::string&, configPath, (), (const)); MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, configProto, (), (const)); MOCK_METHOD(const std::string&, configYaml, (), (const)); - MOCK_METHOD(const absl::optional&, bootstrapVersion, (), (const)); MOCK_METHOD(bool, allowUnknownStaticFields, (), (const)); MOCK_METHOD(bool, rejectUnknownDynamicFields, (), (const)); MOCK_METHOD(bool, ignoreUnknownDynamicFields, (), (const)); diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 928f3ab9c7671..ba81b5922f306 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -147,14 +148,6 @@ class MockCounter : public MockStatWithRefcount { bool used_; uint64_t value_; uint64_t latch_; - - // RefcountInterface - void incRefCount() override { refcount_helper_.incRefCount(); } - bool decRefCount() override { return refcount_helper_.decRefCount(); } - uint32_t use_count() const override { return refcount_helper_.use_count(); } - -private: - RefcountHelper refcount_helper_; }; class MockGauge : public MockStatWithRefcount { @@ -177,14 +170,6 @@ class MockGauge : public MockStatWithRefcount { bool used_; uint64_t value_; ImportMode import_mode_; - - // RefcountInterface - void incRefCount() override { refcount_helper_.incRefCount(); } - bool decRefCount() override { return refcount_helper_.decRefCount(); } - uint32_t use_count() const override { return refcount_helper_.use_count(); } - -private: - RefcountHelper refcount_helper_; }; class MockHistogram : public MockMetric { @@ -301,6 +286,13 @@ class MockStore : public TestUtil::TestStore { MOCK_METHOD(Histogram&, histogramFromString, (const std::string& name, Histogram::Unit unit)); MOCK_METHOD(TextReadout&, textReadout, (const std::string&)); MOCK_METHOD(std::vector, text_readouts, (), (const)); + MOCK_METHOD(void, forEachCounter, + (std::function, std::function), (const)); + MOCK_METHOD(void, forEachGauge, + (std::function, std::function), (const)); + MOCK_METHOD(void, forEachTextReadout, + (std::function, std::function), + (const)); MOCK_METHOD(CounterOptConstRef, findCounter, (StatName), (const)); MOCK_METHOD(GaugeOptConstRef, findGauge, (StatName), (const)); diff --git a/test/mocks/stream_info/mocks.cc b/test/mocks/stream_info/mocks.cc index 28194ab7ab700..49bdf235a5ae6 100644 --- a/test/mocks/stream_info/mocks.cc +++ b/test/mocks/stream_info/mocks.cc @@ -18,7 +18,7 @@ namespace StreamInfo { MockStreamInfo::MockStreamInfo() : start_time_(ts_.systemTime()), filter_state_(std::make_shared(FilterState::LifeSpan::FilterChain)), - downstream_address_provider_(std::make_shared( + downstream_connection_info_provider_(std::make_shared( std::make_shared("127.0.0.2"), std::make_shared("127.0.0.1"))) { ON_CALL(*this, setResponseFlag(_)).WillByDefault(Invoke([this](ResponseFlag response_flag) { @@ -63,16 +63,10 @@ MockStreamInfo::MockStreamInfo() })); ON_CALL(*this, upstreamLocalAddress()).WillByDefault(ReturnRef(upstream_local_address_)); ON_CALL(*this, downstreamAddressProvider()) - .WillByDefault(ReturnPointee(downstream_address_provider_)); - ON_CALL(*this, setDownstreamSslConnection(_)) - .WillByDefault(Invoke( - [this](const auto& connection_info) { downstream_connection_info_ = connection_info; })); + .WillByDefault(ReturnPointee(downstream_connection_info_provider_)); ON_CALL(*this, setUpstreamSslConnection(_)) .WillByDefault(Invoke( [this](const auto& connection_info) { upstream_connection_info_ = connection_info; })); - ON_CALL(*this, downstreamSslConnection()).WillByDefault(Invoke([this]() { - return downstream_connection_info_; - })); ON_CALL(*this, upstreamSslConnection()).WillByDefault(Invoke([this]() { return upstream_connection_info_; })); @@ -126,6 +120,16 @@ MockStreamInfo::MockStreamInfo() filter_chain_name_ = std::string(filter_chain_name); })); ON_CALL(*this, filterChainName()).WillByDefault(ReturnRef(filter_chain_name_)); + ON_CALL(*this, setUpstreamConnectionId(_)).WillByDefault(Invoke([this](uint64_t id) { + upstream_connection_id_ = id; + })); + ON_CALL(*this, upstreamConnectionId()).WillByDefault(Invoke([this]() { + return upstream_connection_id_; + })); + ON_CALL(*this, setAttemptCount(_)).WillByDefault(Invoke([this](uint32_t attempt_count) { + attempt_count_ = attempt_count; + })); + ON_CALL(*this, attemptCount()).WillByDefault(Invoke([this]() { return attempt_count_; })); } MockStreamInfo::~MockStreamInfo() = default; diff --git a/test/mocks/stream_info/mocks.h b/test/mocks/stream_info/mocks.h index dbc2fec6c5bd4..2d296f9697fb5 100644 --- a/test/mocks/stream_info/mocks.h +++ b/test/mocks/stream_info/mocks.h @@ -65,12 +65,10 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, upstreamLocalAddress, (), (const)); MOCK_METHOD(bool, healthCheck, (), (const)); MOCK_METHOD(void, healthCheck, (bool is_health_check)); - MOCK_METHOD(const Network::SocketAddressProvider&, downstreamAddressProvider, (), (const)); - MOCK_METHOD(void, setDownstreamSslConnection, (const Ssl::ConnectionInfoConstSharedPtr&)); - MOCK_METHOD(Ssl::ConnectionInfoConstSharedPtr, downstreamSslConnection, (), (const)); + MOCK_METHOD(const Network::ConnectionInfoProvider&, downstreamAddressProvider, (), (const)); MOCK_METHOD(void, setUpstreamSslConnection, (const Ssl::ConnectionInfoConstSharedPtr&)); MOCK_METHOD(Ssl::ConnectionInfoConstSharedPtr, upstreamSslConnection, (), (const)); - MOCK_METHOD(const Router::RouteEntry*, routeEntry, (), (const)); + MOCK_METHOD(Router::RouteConstSharedPtr, route, (), (const)); MOCK_METHOD(envoy::config::core::v3::Metadata&, dynamicMetadata, ()); MOCK_METHOD(const envoy::config::core::v3::Metadata&, dynamicMetadata, (), (const)); MOCK_METHOD(void, setDynamicMetadata, (const std::string&, const ProtobufWkt::Struct&)); @@ -96,6 +94,10 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD(void, setConnectionID, (uint64_t)); MOCK_METHOD(void, setFilterChainName, (const absl::string_view)); MOCK_METHOD(const std::string&, filterChainName, (), (const)); + MOCK_METHOD(void, setUpstreamConnectionId, (uint64_t)); + MOCK_METHOD(absl::optional, upstreamConnectionId, (), (const)); + MOCK_METHOD(void, setAttemptCount, (uint32_t), ()); + MOCK_METHOD(absl::optional, attemptCount, (), (const)); std::shared_ptr> host_{ new testing::NiceMock()}; @@ -122,12 +124,14 @@ class MockStreamInfo : public StreamInfo { uint64_t bytes_received_{}; uint64_t bytes_sent_{}; Network::Address::InstanceConstSharedPtr upstream_local_address_; - std::shared_ptr downstream_address_provider_; + std::shared_ptr downstream_connection_info_provider_; Ssl::ConnectionInfoConstSharedPtr downstream_connection_info_; Ssl::ConnectionInfoConstSharedPtr upstream_connection_info_; std::string route_name_; std::string upstream_transport_failure_reason_; std::string filter_chain_name_; + absl::optional upstream_connection_id_; + absl::optional attempt_count_; }; } // namespace StreamInfo diff --git a/test/mocks/tcp/BUILD b/test/mocks/tcp/BUILD index b3cbd6903f68f..f61c5af7d0d68 100644 --- a/test/mocks/tcp/BUILD +++ b/test/mocks/tcp/BUILD @@ -16,6 +16,7 @@ envoy_cc_mock( "//envoy/buffer:buffer_interface", "//envoy/tcp:conn_pool_interface", "//test/mocks:common_lib", + "//test/mocks/network:io_handle_mocks", "//test/mocks/network:network_mocks", "//test/mocks/upstream:host_mocks", ], diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 060806a376362..5e5415f88472e 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -105,6 +105,9 @@ class MockClusterInfo : public ClusterInfo { (const)); MOCK_METHOD(ProtocolOptionsConfigConstSharedPtr, extensionProtocolOptions, (const std::string&), (const)); + MOCK_METHOD(const envoy::config::cluster::v3::LoadBalancingPolicy_Policy&, loadBalancingPolicy, + (), (const)); + MOCK_METHOD(TypedLoadBalancerFactory*, loadBalancerFactory, (), (const)); MOCK_METHOD(const envoy::config::cluster::v3::Cluster::CommonLbConfig&, lbConfig, (), (const)); MOCK_METHOD(LoadBalancerType, lbType, (), (const)); MOCK_METHOD(envoy::config::cluster::v3::Cluster::DiscoveryType, type, (), (const)); diff --git a/test/mocks/upstream/cluster_manager.h b/test/mocks/upstream/cluster_manager.h index ff5649caf5170..f8b43ddb76557 100644 --- a/test/mocks/upstream/cluster_manager.h +++ b/test/mocks/upstream/cluster_manager.h @@ -68,6 +68,8 @@ class MockClusterManager : public ClusterManager { const ClusterTimeoutBudgetStatNames& clusterTimeoutBudgetStatNames() const override { return cluster_timeout_budget_stat_names_; } + MOCK_METHOD(void, drainConnections, (const std::string& cluster)); + MOCK_METHOD(void, drainConnections, ()); NiceMock thread_local_cluster_; envoy::config::core::v3::BindConfig bind_config_; diff --git a/test/mocks/upstream/priority_set.cc b/test/mocks/upstream/priority_set.cc index ac4613ac269bc..de2289ede221d 100644 --- a/test/mocks/upstream/priority_set.cc +++ b/test/mocks/upstream/priority_set.cc @@ -11,6 +11,7 @@ namespace Upstream { using ::testing::_; using ::testing::Invoke; +using ::testing::Return; using ::testing::ReturnRef; MockPrioritySet::MockPrioritySet() { @@ -25,6 +26,7 @@ MockPrioritySet::MockPrioritySet() { .WillByDefault(Invoke([this](PrioritySet::PriorityUpdateCb cb) -> Common::CallbackHandlePtr { return priority_update_cb_helper_.add(cb); })); + ON_CALL(*this, crossPriorityHostMap()).WillByDefault(Return(cross_priority_host_map_)); } MockPrioritySet::~MockPrioritySet() = default; diff --git a/test/mocks/upstream/priority_set.h b/test/mocks/upstream/priority_set.h index 8af92a8e19f86..cda2574854f1f 100644 --- a/test/mocks/upstream/priority_set.h +++ b/test/mocks/upstream/priority_set.h @@ -25,8 +25,10 @@ class MockPrioritySet : public PrioritySet { MOCK_METHOD(void, updateHosts, (uint32_t priority, UpdateHostsParams&& update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, const HostVector& hosts_added, - const HostVector& hosts_removed, absl::optional overprovisioning_factor)); + const HostVector& hosts_removed, absl::optional overprovisioning_factor, + HostMapConstSharedPtr cross_priority_host_map)); MOCK_METHOD(void, batchHostUpdate, (BatchUpdateCb&)); + MOCK_METHOD(HostMapConstSharedPtr, crossPriorityHostMap, (), (const)); MockHostSet* getMockHostSet(uint32_t priority) { getHostSet(priority); // Ensure the host set exists. @@ -38,6 +40,8 @@ class MockPrioritySet : public PrioritySet { Common::CallbackManager member_update_cb_helper_; Common::CallbackManager priority_update_cb_helper_; + + HostMapConstSharedPtr cross_priority_host_map_{std::make_shared()}; }; } // namespace Upstream } // namespace Envoy diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index dc8fdd47c1ce1..49446ee5dc291 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -6,21 +6,21 @@ declare -a KNOWN_LOW_COVERAGE=( "source/common:96.5" # Raise when QUIC coverage goes up "source/common/api:75.3" "source/common/api/posix:73.9" -"source/common/common:96.3" "source/common/common/posix:94.1" "source/common/crypto:0.0" "source/common/event:94.2" # Emulated edge events guards don't report LCOV "source/common/filesystem/posix:96.2" "source/common/json:90.9" "source/common/network:95.0" # Flaky, `activateFileEvents`, `startSecureTransport` and `ioctl` do not always report LCOV -"source/common/protobuf:94.8" +"source/common/protobuf:94.7" "source/common/signal:84.5" # Death tests don't report LCOV "source/common/singleton:95.8" "source/common/thread:0.0" # Death tests don't report LCOV "source/common/matcher:95.0" -"source/common/quic:90.6" +"source/common/quic:91.2" "source/common/tracing:96.1" "source/common/watchdog:42.9" # Death tests don't report LCOV +"source/common/config/xds_mux:94.5" "source/exe:94.3" "source/extensions/common/crypto:91.5" "source/extensions/common/tap:95.9" @@ -29,9 +29,9 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/common/wasm/v8:85.4" "source/extensions/filters/common/expr:96.4" "source/extensions/filters/common/fault:94.6" -"source/extensions/filters/common/rbac:87.5" -"source/extensions/filters/http/cache:92.4" -"source/extensions/filters/http/cache/simple_http_cache:95.2" +"source/extensions/filters/common/rbac:88.6" +"source/extensions/filters/http/cache:92.6" +"source/extensions/filters/http/cache/simple_http_cache:95.6" "source/extensions/filters/http/grpc_json_transcoder:95.6" "source/extensions/filters/http/ip_tagging:91.2" "source/extensions/filters/http/kill_request:85.0" # Death tests don't report LCOV @@ -45,15 +45,14 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/health_checkers:95.9" "source/extensions/health_checkers/redis:95.9" "source/extensions/quic_listeners:85.1" -"source/extensions/stat_sinks/common/statsd:96.5" "source/extensions/stat_sinks/graphite_statsd:85.7" "source/extensions/stat_sinks/statsd:85.2" -"source/extensions/tracers/opencensus:92.5" -"source/extensions/tracers/xray:94.0" -"source/extensions/transport_sockets:95.7" +"source/extensions/tracers/opencensus:94.2" +"source/extensions/tracers/xray:96.4" +"source/extensions/transport_sockets:95.8" "source/extensions/transport_sockets/tls/cert_validator:96.5" "source/extensions/transport_sockets/tls/private_key:76.9" -"source/extensions/transport_sockets/tls:95.1" +"source/extensions/transport_sockets/tls:95.2" "source/extensions/wasm_runtime/wamr:0.0" # Not enabled in coverage build "source/extensions/wasm_runtime/wasmtime:0.0" # Not enabled in coverage build "source/extensions/wasm_runtime/wavm:0.0" # Not enabled in coverage build @@ -61,7 +60,7 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/watchdog/profile_action:85.7" "source/server:94.4" # flaky: be careful adjusting. See https://github.com/envoyproxy/envoy/issues/15239 "source/server/admin:95.8" -"source/server/config_validation:78.7" +"source/server/config_validation:79.2" ) [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" diff --git a/test/server/BUILD b/test/server/BUILD index 15fdece73619b..952131ac020fe 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -322,6 +322,8 @@ envoy_cc_test( deps = [ ":listener_manager_impl_test_lib", ":utility_lib", + "//source/extensions/filters/http/router:config", + "//source/extensions/request_id/uuid:config", "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tls:config", "//test/test_common:threadsafe_singleton_injector_lib", @@ -418,6 +420,7 @@ envoy_cc_test( "//source/server:server_lib", "//test/common/config:dummy_config_proto_cc_proto", "//test/common/stats:stat_test_utility_lib", + "//test/config:v2_link_hacks", "//test/integration:integration_lib", "//test/mocks/server:bootstrap_extension_factory_mocks", "//test/mocks/server:fatal_action_factory_mocks", @@ -496,3 +499,22 @@ envoy_benchmark_test( timeout = "long", benchmark_binary = "filter_chain_benchmark_test", ) + +envoy_cc_benchmark_binary( + name = "server_stats_flush_benchmark", + srcs = ["server_stats_flush_benchmark_test.cc"], + external_deps = [ + "benchmark", + ], + deps = [ + "//envoy/stats:stats_interface", + "//source/common/stats:thread_local_store_lib", + "//source/server:server_lib", + "//test/test_common:simulated_time_system_lib", + ], +) + +envoy_benchmark_test( + name = "server_stats_flush_benchmark_test", + benchmark_binary = "server_stats_flush_benchmark", +) diff --git a/test/server/active_tcp_listener_test.cc b/test/server/active_tcp_listener_test.cc index 8284559cd5ca8..2e6e2b807b03f 100644 --- a/test/server/active_tcp_listener_test.cc +++ b/test/server/active_tcp_listener_test.cc @@ -95,7 +95,7 @@ TEST_F(ActiveTcpListenerTest, PopulateSNIWhenActiveTcpSocketTimeout) { absl::string_view server_name = "envoy.io"; auto accepted_socket = std::make_unique>(); - accepted_socket->address_provider_->setRequestedServerName(server_name); + accepted_socket->connection_info_provider_->setRequestedServerName(server_name); // fake the socket is open. NiceMock io_handle; @@ -189,7 +189,7 @@ TEST_F(ActiveTcpListenerTest, RedirectedRebalancer) { })); EXPECT_CALL(*test_filter, onAccept(_)) .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus { - cb.socket().addressProvider().restoreLocalAddress(alt_address); + cb.socket().connectionInfoProvider().restoreLocalAddress(alt_address); return Network::FilterStatus::Continue; })); // Verify that listener1 hands off the connection by not creating network filter chain. diff --git a/test/server/admin/admin_test.cc b/test/server/admin/admin_test.cc index 91a09d5f3533f..ff920e17b0eb0 100644 --- a/test/server/admin/admin_test.cc +++ b/test/server/admin/admin_test.cc @@ -58,7 +58,7 @@ TEST_P(AdminInstanceTest, WriteAddressToFile) { std::ifstream address_file(address_out_path_); std::string address_from_file; std::getline(address_file, address_from_file); - EXPECT_EQ(admin_.socket().addressProvider().localAddress()->asString(), address_from_file); + EXPECT_EQ(admin_.socket().connectionInfoProvider().localAddress()->asString(), address_from_file); } TEST_P(AdminInstanceTest, AdminAddress) { diff --git a/test/server/config_validation/BUILD b/test/server/config_validation/BUILD index 51ebda4ae37c4..626b0b9381f3c 100644 --- a/test/server/config_validation/BUILD +++ b/test/server/config_validation/BUILD @@ -48,6 +48,7 @@ envoy_cc_test( "//configs:example_configs", "//test/config_test:example_configs_test_setup.sh", ], + env = {"EXAMPLE_CONFIGS_TAR_PATH": "envoy/configs/example_configs.tar"}, deps = [ "//source/extensions/filters/http/router:config", "//source/extensions/filters/listener/original_dst:config", @@ -74,6 +75,7 @@ envoy_cc_test( "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", "//test/test_common:test_time_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/test/server/config_validation/dispatcher_test.cc b/test/server/config_validation/dispatcher_test.cc index 608135be22ae0..e72afe4cdc223 100644 --- a/test/server/config_validation/dispatcher_test.cc +++ b/test/server/config_validation/dispatcher_test.cc @@ -1,5 +1,7 @@ #include +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + #include "source/common/common/thread.h" #include "source/common/event/dispatcher_impl.h" #include "source/common/event/libevent.h" @@ -24,7 +26,7 @@ class ConfigValidation : public testing::TestWithParam( Thread::threadFactoryForTest(), stats_store_, test_time_.timeSystem(), - Filesystem::fileSystemForTest(), random_generator_); + Filesystem::fileSystemForTest(), random_generator_, bootstrap_); dispatcher_ = validation_->allocateDispatcher("test_thread"); } @@ -32,6 +34,7 @@ class ConfigValidation : public testing::TestWithParam random_generator_; + envoy::config::bootstrap::v3::Bootstrap bootstrap_; private: // Using config validation API. diff --git a/test/server/config_validation/xds_fuzz.cc b/test/server/config_validation/xds_fuzz.cc index 653acd8db012b..06bb9d1f387b2 100644 --- a/test/server/config_validation/xds_fuzz.cc +++ b/test/server/config_validation/xds_fuzz.cc @@ -12,26 +12,25 @@ namespace Envoy { // Helper functions to build API responses. envoy::config::cluster::v3::Cluster XdsFuzzTest::buildCluster(const std::string& name) { - return ConfigHelper::buildCluster(name, "ROUND_ROBIN", api_version_); + return ConfigHelper::buildCluster(name, "ROUND_ROBIN"); }; envoy::config::endpoint::v3::ClusterLoadAssignment XdsFuzzTest::buildClusterLoadAssignment(const std::string& name) { return ConfigHelper::buildClusterLoadAssignment( name, Network::Test::getLoopbackAddressString(ip_version_), - fake_upstreams_[0]->localAddress()->ip()->port(), api_version_); + fake_upstreams_[0]->localAddress()->ip()->port()); } envoy::config::listener::v3::Listener XdsFuzzTest::buildListener(const std::string& listener_name, const std::string& route_name) { - return ConfigHelper::buildListener(listener_name, route_name, - Network::Test::getLoopbackAddressString(ip_version_), - "ads_test", api_version_); + return ConfigHelper::buildListener( + listener_name, route_name, Network::Test::getLoopbackAddressString(ip_version_), "ads_test"); } envoy::config::route::v3::RouteConfiguration XdsFuzzTest::buildRouteConfig(const std::string& route_name) { - return ConfigHelper::buildRouteConfig(route_name, "cluster_0", api_version_); + return ConfigHelper::buildRouteConfig(route_name, "cluster_0"); } // Helper functions to send API responses. @@ -55,17 +54,15 @@ void XdsFuzzTest::updateRoute( std::to_string(version_)); } -XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& input, - envoy::config::core::v3::ApiVersion api_version) +XdsFuzzTest::XdsFuzzTest(const test::server::config_validation::XdsTestCase& input) : HttpIntegrationTest( Http::CodecType::HTTP2, TestEnvironment::getIpVersionsForTest()[0], ConfigHelper::adsBootstrap(input.config().sotw_or_delta() == test::server::config_validation::Config::SOTW ? "GRPC" - : "DELTA_GRPC", - api_version)), + : "DELTA_GRPC")), verifier_(input.config().sotw_or_delta()), actions_(input.actions()), version_(1), - api_version_(api_version), ip_version_(TestEnvironment::getIpVersionsForTest()[0]) { + ip_version_(TestEnvironment::getIpVersionsForTest()[0]) { use_lds_ = false; create_xds_upstream_ = true; tls_xds_upstream_ = false; diff --git a/test/server/config_validation/xds_fuzz.h b/test/server/config_validation/xds_fuzz.h index 602c312ee8a03..42c23959ccb73 100644 --- a/test/server/config_validation/xds_fuzz.h +++ b/test/server/config_validation/xds_fuzz.h @@ -22,8 +22,7 @@ namespace Envoy { class XdsFuzzTest : public HttpIntegrationTest { public: - XdsFuzzTest(const test::server::config_validation::XdsTestCase& input, - envoy::config::core::v3::ApiVersion api_version); + XdsFuzzTest(const test::server::config_validation::XdsTestCase& input); envoy::config::cluster::v3::Cluster buildCluster(const std::string& name); @@ -77,7 +76,6 @@ class XdsFuzzTest : public HttpIntegrationTest { std::vector listeners_; uint64_t version_; - envoy::config::core::v3::ApiVersion api_version_; Network::Address::IpVersion ip_version_; diff --git a/test/server/config_validation/xds_fuzz_test.cc b/test/server/config_validation/xds_fuzz_test.cc index 5b41d4a01bbbd..231100d2972b9 100644 --- a/test/server/config_validation/xds_fuzz_test.cc +++ b/test/server/config_validation/xds_fuzz_test.cc @@ -12,7 +12,7 @@ DEFINE_PROTO_FUZZER(const test::server::config_validation::XdsTestCase& input) { ENVOY_LOG_MISC(debug, "ProtoValidationException: {}", e.what()); return; } - XdsFuzzTest test(input, envoy::config::core::v3::ApiVersion::V3); + XdsFuzzTest test(input); test.replay(); } diff --git a/test/server/config_validation/xds_verifier_test.cc b/test/server/config_validation/xds_verifier_test.cc index 3e65220687e7a..f0cbef7fdc6b7 100644 --- a/test/server/config_validation/xds_verifier_test.cc +++ b/test/server/config_validation/xds_verifier_test.cc @@ -7,13 +7,11 @@ namespace Envoy { envoy::config::listener::v3::Listener buildListener(const std::string& listener_name, const std::string& route_name) { - return ConfigHelper::buildListener(listener_name, route_name, "", "ads_test", - envoy::config::core::v3::ApiVersion::V3); + return ConfigHelper::buildListener(listener_name, route_name, "", "ads_test"); } envoy::config::route::v3::RouteConfiguration buildRoute(const std::string& route_name) { - return ConfigHelper::buildRouteConfig(route_name, "cluster_0", - envoy::config::core::v3::ApiVersion::V3); + return ConfigHelper::buildRouteConfig(route_name, "cluster_0"); } // Add, warm, drain and remove a listener. diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index f2f02f385bc00..8862f46df622a 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -612,9 +612,8 @@ TEST(InitialImplTest, LayeredRuntime) { admin_layer: {} )EOF"; const auto bootstrap = TestUtility::parseYaml(yaml); - NiceMock options; NiceMock server; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); EXPECT_THAT(config.runtime(), ProtoEq(bootstrap.layered_runtime())); } @@ -625,9 +624,8 @@ TEST(InitialImplTest, EmptyLayeredRuntime) { )EOF"; const auto bootstrap = TestUtility::parseYaml(bootstrap_yaml); - NiceMock options; NiceMock server; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); const std::string expected_yaml = R"EOF( layers: @@ -676,9 +674,8 @@ TEST_F(ConfigurationImplTest, AdminSocketOptions) { )EOF"; auto bootstrap = Upstream::parseBootstrapFromV3Json(json); - NiceMock options; NiceMock server; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); config.initAdminAccessLog(bootstrap, server_); Network::MockListenSocket socket_mock; @@ -717,9 +714,8 @@ TEST_F(ConfigurationImplTest, FileAccessLogOutput) { )EOF"; auto bootstrap = Upstream::parseBootstrapFromV3Json(json); - NiceMock options; NiceMock server; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); config.initAdminAccessLog(bootstrap, server_); Network::MockListenSocket socket_mock; @@ -1041,9 +1037,8 @@ TEST_F(ConfigurationImplTest, DEPRECATED_FEATURE_TEST(DeprecatedAccessLogPathWit )EOF"; auto bootstrap = Upstream::parseBootstrapFromV3Json(json); - NiceMock options; NiceMock server; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); config.initAdminAccessLog(bootstrap, server_); Network::MockListenSocket socket_mock; @@ -1078,8 +1073,7 @@ TEST_F(ConfigurationImplTest, AccessLogWithFilter) { )EOF"; auto bootstrap = Upstream::parseBootstrapFromV3Json(json); - NiceMock options; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); config.initAdminAccessLog(bootstrap, server_); ASSERT_EQ(config.admin().accessLogs().size(), 1); @@ -1114,8 +1108,7 @@ TEST_F(ConfigurationImplTest, DEPRECATED_FEATURE_TEST(DeprecatedAccessLogPathWit )EOF"; auto bootstrap = Upstream::parseBootstrapFromV3Json(json); - NiceMock options; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); config.initAdminAccessLog(bootstrap, server_); ASSERT_EQ(config.admin().accessLogs().size(), 2); @@ -1129,8 +1122,7 @@ TEST_F(ConfigurationImplTest, EmptyAdmin) { )EOF"; auto bootstrap = Upstream::parseBootstrapFromV3Json(json); - NiceMock options; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); config.initAdminAccessLog(bootstrap, server_); ASSERT_EQ(config.admin().accessLogs().size(), 0); @@ -1152,9 +1144,8 @@ TEST_F(ConfigurationImplTest, DEPRECATED_FEATURE_TEST(DeprecatedAccessLogPath)) )EOF"; auto bootstrap = Upstream::parseBootstrapFromV3Json(json); - NiceMock options; NiceMock server; - InitialImpl config(bootstrap, options); + InitialImpl config(bootstrap); config.initAdminAccessLog(bootstrap, server_); Network::MockListenSocket socket_mock; diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index c911742fa469e..442c38adc9999 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -280,7 +280,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable Network::FilterStatus { - cb.socket().addressProvider().restoreLocalAddress(original_dst_address); + cb.socket().connectionInfoProvider().restoreLocalAddress(original_dst_address); return Network::FilterStatus::Continue; })); EXPECT_CALL(*test_filter, destroy_()); @@ -557,7 +557,7 @@ TEST_F(ConnectionHandlerTest, SetsTransportSocketConnectTimeout) { EXPECT_CALL(*filter_chain_, transportSocketConnectTimeout) .WillOnce(Return(std::chrono::seconds(5))); EXPECT_CALL(*server_connection, - setTransportSocketConnectTimeout(std::chrono::milliseconds(5 * 1000))); + setTransportSocketConnectTimeout(std::chrono::milliseconds(5 * 1000), _)); EXPECT_CALL(*access_log_, log(_, _, _, _)); listener_callbacks->onAccept(std::make_unique>()); @@ -669,7 +669,7 @@ TEST_F(ConnectionHandlerTest, NormalRedirect) { })); EXPECT_CALL(*test_filter, onAccept(_)) .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus { - cb.socket().addressProvider().restoreLocalAddress(alt_address); + cb.socket().connectionInfoProvider().restoreLocalAddress(alt_address); return Network::FilterStatus::Continue; })); EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get())); @@ -739,7 +739,7 @@ TEST_F(ConnectionHandlerTest, FallbackToWildcardListener) { new Network::Address::Ipv4Instance("127.0.0.2", 0, nullptr)); EXPECT_CALL(*test_filter, onAccept(_)) .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus { - cb.socket().addressProvider().restoreLocalAddress(alt_address); + cb.socket().connectionInfoProvider().restoreLocalAddress(alt_address); return Network::FilterStatus::Continue; })); EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get())); @@ -817,7 +817,7 @@ TEST_F(ConnectionHandlerTest, OldBehaviorMatchFirstWildcardListener) { new Network::Address::Ipv6Instance("::2", 80, nullptr)); EXPECT_CALL(*test_filter, onAccept(_)) .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus { - cb.socket().addressProvider().restoreLocalAddress(alt_address); + cb.socket().connectionInfoProvider().restoreLocalAddress(alt_address); return Network::FilterStatus::Continue; })); EXPECT_CALL(manager_, findFilterChain(_)).Times(0); @@ -897,7 +897,7 @@ TEST_F(ConnectionHandlerTest, MatchIPv6WildcardListener) { new Network::Address::Ipv6Instance("::2", 80, nullptr)); EXPECT_CALL(*test_filter, onAccept(_)) .WillOnce(Invoke([&](Network::ListenerFilterCallbacks& cb) -> Network::FilterStatus { - cb.socket().addressProvider().restoreLocalAddress(alt_address); + cb.socket().connectionInfoProvider().restoreLocalAddress(alt_address); return Network::FilterStatus::Continue; })); EXPECT_CALL(manager_, findFilterChain(_)).Times(0); diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 89569264633b0..919085fdcb945 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -39,7 +39,8 @@ class MockFilterChainFactoryBuilder : public FilterChainFactoryBuilder { class MockConnectionSocket : public Network::ConnectionSocket { public: MockConnectionSocket() - : address_provider_(std::make_shared(nullptr, nullptr)) {} + : connection_info_provider_( + std::make_shared(nullptr, nullptr)) {} static std::unique_ptr createMockConnectionSocket(uint16_t destination_port, const std::string& destination_address, @@ -49,19 +50,19 @@ class MockConnectionSocket : public Network::ConnectionSocket { auto res = std::make_unique(); if (absl::StartsWith(destination_address, "/")) { - res->address_provider_->setLocalAddress( + res->connection_info_provider_->setLocalAddress( std::make_shared(destination_address)); } else { - res->address_provider_->setLocalAddress( + res->connection_info_provider_->setLocalAddress( Network::Utility::parseInternetAddress(destination_address, destination_port)); } if (absl::StartsWith(source_address, "/")) { - res->address_provider_->setRemoteAddress( + res->connection_info_provider_->setRemoteAddress( std::make_shared(source_address)); } else { - res->address_provider_->setRemoteAddress( + res->connection_info_provider_->setRemoteAddress( Network::Utility::parseInternetAddress(source_address, source_port)); - res->address_provider_->setDirectRemoteAddressForTest( + res->connection_info_provider_->setDirectRemoteAddressForTest( Network::Utility::parseInternetAddress(source_address, source_port)); } res->server_name_ = server_name; @@ -75,12 +76,14 @@ class MockConnectionSocket : public Network::ConnectionSocket { const std::vector& requestedApplicationProtocols() const override { return application_protocols_; } - Network::SocketAddressSetter& addressProvider() override { return *address_provider_; } - const Network::SocketAddressSetter& addressProvider() const override { - return *address_provider_; + Network::ConnectionInfoSetter& connectionInfoProvider() override { + return *connection_info_provider_; } - Network::SocketAddressProviderSharedPtr addressProviderSharedPtr() const override { - return address_provider_; + const Network::ConnectionInfoSetter& connectionInfoProvider() const override { + return *connection_info_provider_; + } + Network::ConnectionInfoProviderSharedPtr connectionInfoProviderSharedPtr() const override { + return connection_info_provider_; } // Wont call @@ -92,7 +95,7 @@ class MockConnectionSocket : public Network::ConnectionSocket { bool isOpen() const override { return false; } Network::Socket::Type socketType() const override { return Network::Socket::Type::Stream; } Network::Address::Type addressType() const override { - return address_provider_->localAddress()->type(); + return connection_info_provider_->localAddress()->type(); } absl::optional ipVersion() const override { return Network::Address::IpVersion::v4; @@ -126,7 +129,7 @@ class MockConnectionSocket : public Network::ConnectionSocket { private: Network::IoHandlePtr io_handle_; OptionsSharedPtr options_; - std::shared_ptr address_provider_; + std::shared_ptr connection_info_provider_; std::string server_name_; std::string transport_protocol_; std::vector application_protocols_; diff --git a/test/server/filter_chain_manager_impl_test.cc b/test/server/filter_chain_manager_impl_test.cc index a329d6e86214c..e62d8c399da0f 100644 --- a/test/server/filter_chain_manager_impl_test.cc +++ b/test/server/filter_chain_manager_impl_test.cc @@ -79,7 +79,7 @@ class FilterChainManagerImplTest : public testing::Test { local_address_ = Network::Utility::parseInternetAddress(destination_address, destination_port); } - mock_socket->address_provider_->setLocalAddress(local_address_); + mock_socket->connection_info_provider_->setLocalAddress(local_address_); ON_CALL(*mock_socket, requestedServerName()) .WillByDefault(Return(absl::AsciiStrToLower(server_name))); @@ -93,7 +93,7 @@ class FilterChainManagerImplTest : public testing::Test { } else { remote_address_ = Network::Utility::parseInternetAddress(source_address, source_port); } - mock_socket->address_provider_->setRemoteAddress(remote_address_); + mock_socket->connection_info_provider_->setRemoteAddress(remote_address_); return filter_chain_manager_.findFilterChain(*mock_socket); } diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index 6f1f08fe4fa78..84e1a3a200c95 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -36,7 +36,16 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) { filter_chains: - filter_chain_match: transport_protocol: "quic" - filters: [] + filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: HTTP3 + stat_prefix: hcm + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router transport_socket: name: envoy.transport_sockets.quic typed_config: @@ -171,6 +180,107 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryWithWrongTransportSoc #endif } +TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryWithWrongCodec) { + const std::string yaml = TestEnvironment::substitute(R"EOF( +address: + socket_address: + address: 127.0.0.1 + protocol: UDP + port_value: 1234 +filter_chains: +- filter_chain_match: + transport_protocol: "quic" + filters: [] + transport_socket: + name: envoy.transport_sockets.quic + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport + downstream_tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" + match_subject_alt_names: + - exact: localhost + - exact: 127.0.0.1 +udp_listener_config: + quic_options: {} + )EOF", + Network::Address::IpVersion::v4); + + envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml); + +#if defined(ENVOY_ENABLE_QUIC) + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException, + "error building network filter chain for quic listener: requires exactly " + "one http_connection_manager filter."); +#else + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException, + "QUIC is configured but not enabled in the build."); +#endif +} + +TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryWithConnectionBalencer) { + const std::string yaml = TestEnvironment::substitute(R"EOF( +address: + socket_address: + address: 127.0.0.1 + protocol: UDP + port_value: 1234 +filter_chains: +- filter_chain_match: + transport_protocol: "quic" + filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: HTTP3 + stat_prefix: hcm + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + transport_socket: + name: envoy.transport_sockets.quic + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport + downstream_tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" + private_key: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" + validation_context: + trusted_ca: + filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" + match_subject_alt_names: + - exact: localhost + - exact: 127.0.0.1 +udp_listener_config: + quic_options: {} +connection_balance_config: + exact_balance: {} + )EOF", + Network::Address::IpVersion::v4); + + envoy::config::listener::v3::Listener listener_proto = parseListenerFromV3Yaml(yaml); + +#if defined(ENVOY_ENABLE_QUIC) + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException, + "connection_balance_config is configured for QUIC listener which doesn't " + "work with connection balancer."); +#else + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(listener_proto, "", true), EnvoyException, + "QUIC is configured but not enabled in the build."); +#endif +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 8a8139fa33275..43f0e9b0269e7 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -258,44 +258,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsTransportSocket) { EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); } -TEST_F(ListenerManagerImplWithRealFiltersTest, - DEPRECATED_FEATURE_TEST(TlsTransportSocketLegacyConfig)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = TestEnvironment::substitute(R"EOF( -address: - socket_address: - address: 127.0.0.1 - port_value: 1234 -filter_chains: -- filters: [] - transport_socket: - name: tls - typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext - common_tls_context: - tls_certificates: - - certificate_chain: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" - private_key: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" - validation_context: - trusted_ca: - filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" - verify_subject_alt_name: - - localhost - - 127.0.0.1 - )EOF", - Network::Address::IpVersion::v4); - - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); - manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); - EXPECT_EQ(1U, manager_->listeners().size()); - - auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); - ASSERT_NE(filter_chain, nullptr); - EXPECT_TRUE(filter_chain->transportSocketFactory().implementsSecureTransport()); -} - TEST_F(ListenerManagerImplWithRealFiltersTest, TransportSocketConnectTimeout) { const std::string yaml = R"EOF( address: @@ -1554,7 +1516,7 @@ name: foo Network::Address::InstanceConstSharedPtr local_address( new Network::Address::Ipv4Instance("127.0.0.1", 1234)); - listener_factory_.socket_->address_provider_->setLocalAddress(local_address); + listener_factory_.socket_->connection_info_provider_->setLocalAddress(local_address); ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); @@ -1614,7 +1576,7 @@ name: foo Network::Address::InstanceConstSharedPtr local_address( new Network::Address::Ipv4Instance("127.0.0.1", 1234)); - listener_factory_.socket_->address_provider_->setLocalAddress(local_address); + listener_factory_.socket_->connection_info_provider_->setLocalAddress(local_address); ListenerHandle* listener_foo = expectListenerCreate(false, true); EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); @@ -1837,6 +1799,37 @@ TEST_F(ListenerManagerImplTest, NotSupportedDatagramUds) { "socket type SocketType::Datagram not supported for pipes"); } +TEST_F(ListenerManagerImplTest, CantListen) { + InSequence s; + + EXPECT_CALL(*worker_, start(_, _)); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); + + const std::string listener_foo_yaml = R"EOF( +name: foo +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +filter_chains: +- filters: [] + )EOF"; + + ListenerHandle* listener_foo = expectListenerCreate(true, true); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); + EXPECT_CALL(listener_foo->target_, initialize()); + manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true); + + EXPECT_CALL(*listener_factory_.socket_->io_handle_, listen(_)) + .WillOnce(Return(Api::SysCallIntResult{-1, 100})); + EXPECT_CALL(*listener_foo, onDestroy()); + listener_foo->target_.ready(); + + EXPECT_EQ( + 1UL, + server_.stats_store_.counterFromString("listener_manager.listener_create_failure").value()); +} + TEST_F(ListenerManagerImplTest, CantBindSocket) { time_system_.setSystemTime(std::chrono::milliseconds(1001001001001)); InSequence s; @@ -3703,12 +3696,8 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateIncomplete) { )EOF", Network::Address::IpVersion::v4); - EXPECT_THROW_WITH_MESSAGE( - manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, - TestEnvironment::substitute( - "Failed to load incomplete certificate from {{ test_rundir }}" - "/test/extensions/transport_sockets/tls/test_data/san_dns3_chain.pem, ", - Network::Address::IpVersion::v4)); + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), + EnvoyException, "Failed to load incomplete private key from path: "); } TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInvalidCertificateChain) { @@ -4037,8 +4026,8 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterOutbound) { })); EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager)); - EXPECT_TRUE(socket.addressProvider().localAddressRestored()); - EXPECT_EQ("127.0.0.2:2345", socket.addressProvider().localAddress()->asString()); + EXPECT_TRUE(socket.connectionInfoProvider().localAddressRestored()); + EXPECT_EQ("127.0.0.2:2345", socket.connectionInfoProvider().localAddress()->asString()); #endif } @@ -4134,8 +4123,8 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterInbound) { })); EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager)); - EXPECT_TRUE(socket.addressProvider().localAddressRestored()); - EXPECT_EQ("127.0.0.2:2345", socket.addressProvider().localAddress()->asString()); + EXPECT_TRUE(socket.connectionInfoProvider().localAddressRestored()); + EXPECT_EQ("127.0.0.2:2345", socket.connectionInfoProvider().localAddress()->asString()); #endif } @@ -4215,8 +4204,8 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) { })); EXPECT_TRUE(filterChainFactory.createListenerFilterChain(manager)); - EXPECT_TRUE(socket.addressProvider().localAddressRestored()); - EXPECT_EQ("[1::2]:2345", socket.addressProvider().localAddress()->asString()); + EXPECT_TRUE(socket.connectionInfoProvider().localAddressRestored()); + EXPECT_EQ("[1::2]:2345", socket.connectionInfoProvider().localAddress()->asString()); } // Validate that when neither transparent nor freebind is not set in the diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 077c221b4fc0b..e3869f80d5f70 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -194,7 +194,7 @@ class ListenerManagerImplTest : public testing::Test { local_address_ = Network::Utility::parseInternetAddress(destination_address, destination_port); } - socket_->address_provider_->setLocalAddress(local_address_); + socket_->connection_info_provider_->setLocalAddress(local_address_); ON_CALL(*socket_, requestedServerName()).WillByDefault(Return(absl::string_view(server_name))); ON_CALL(*socket_, detectedTransportProtocol()) @@ -207,7 +207,7 @@ class ListenerManagerImplTest : public testing::Test { } else { remote_address_ = Network::Utility::parseInternetAddress(source_address, source_port); } - socket_->address_provider_->setRemoteAddress(remote_address_); + socket_->connection_info_provider_->setRemoteAddress(remote_address_); if (direct_source_address.empty()) { direct_source_address = source_address; @@ -219,7 +219,7 @@ class ListenerManagerImplTest : public testing::Test { direct_remote_address_ = Network::Utility::parseInternetAddress(direct_source_address, source_port); } - socket_->address_provider_->setDirectRemoteAddressForTest(direct_remote_address_); + socket_->connection_info_provider_->setDirectRemoteAddressForTest(direct_remote_address_); return manager_->listeners().back().get().filterChainManager().findFilterChain(*socket_); } diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index b0c744cfde56d..07c5659aece2a 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -635,24 +635,5 @@ TEST(DisableExtensions, DEPRECATED_FEATURE_TEST(IsDisabled)) { nullptr); } -TEST(FactoryByTypeTest, EarlierVersionConfigType) { - envoy::config::filter::http::buffer::v2::Buffer v2_config; - auto factory = Registry::FactoryRegistry:: - getFactoryByType(v2_config.GetDescriptor()->full_name()); - EXPECT_NE(factory, nullptr); - EXPECT_EQ(factory->name(), "envoy.filters.http.buffer"); - - envoy::extensions::filters::http::buffer::v3::Buffer v3_config; - factory = Registry::FactoryRegistry:: - getFactoryByType(v3_config.GetDescriptor()->full_name()); - EXPECT_NE(factory, nullptr); - EXPECT_EQ(factory->name(), "envoy.filters.http.buffer"); - - ProtobufWkt::Any non_api_type; - factory = Registry::FactoryRegistry:: - getFactoryByType(non_api_type.GetDescriptor()->full_name()); - EXPECT_EQ(factory, nullptr); -} - } // namespace } // namespace Envoy diff --git a/test/server/overload_manager_impl_test.cc b/test/server/overload_manager_impl_test.cc index 7bad384afdd99..60b2214487c8e 100644 --- a/test/server/overload_manager_impl_test.cc +++ b/test/server/overload_manager_impl_test.cc @@ -691,6 +691,16 @@ TEST_F(OverloadManagerImplTest, DuplicateTrigger) { EXPECT_THROW_WITH_REGEX(createOverloadManager(config), EnvoyException, "Duplicate trigger .*"); } +TEST_F(OverloadManagerImplTest, ShouldThrowIfUsingResetStreamsWithoutBufferFactoryConfig) { + const std::string lower_greater_than_upper_config = R"EOF( + actions: + - name: envoy.overload_actions.reset_high_memory_stream + )EOF"; + + EXPECT_THROW_WITH_REGEX(createOverloadManager(lower_greater_than_upper_config), EnvoyException, + "Overload action .* requires buffer_factory_config."); +} + TEST_F(OverloadManagerImplTest, Shutdown) { setDispatcherExpectation(); diff --git a/test/server/server_corpus/dns_resolver_refresh b/test/server/server_corpus/dns_resolver_refresh new file mode 100644 index 0000000000000..76f5dee4e2c29 --- /dev/null +++ b/test/server/server_corpus/dns_resolver_refresh @@ -0,0 +1,197 @@ +static_resources { + clusters { + name: "ser" + type: STRICT_DNS + connect_timeout { + seconds: 1025 + nanos: 262239 + } + lb_policy: RING_HASH + health_checks { + timeout { + nanos: 262239 + } + interval { + seconds: 10838081697 + nanos: 2655 + } + unhealthy_threshold { + } + healthy_threshold { + } + http_health_check { + host: "\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\037\001\037\037\037\037\037\037\037\037\037\037\037\037\037f\037\037\037\037" + path: "&" + } + no_traffic_interval { + nanos: 32 + } + healthy_edge_interval { + nanos: 95 + } + interval_jitter_percent: 687361 + tls_options { + alpn_protocols: "" + } + no_traffic_healthy_interval { + seconds: 1025 + nanos: 95 + } + } + dns_refresh_rate { + nanos: 2555904 + } + dns_resolvers { + socket_address { + address: "127.0.0.1" + port_value: 9901 + } + } + lb_subset_config { + subset_selectors { + } + } + metadata { + } + common_lb_config { + healthy_panic_threshold { + value: 1.39067116059563e-309 + } + ignore_new_hosts_until_first_hc: true + consistent_hashing_lb_config { + hash_balance_factor { + value: 2752512 + } + } + } + load_assignment { + cluster_name: "." + endpoints { + locality { + } + lb_endpoints { + endpoint { + address { + socket_address { + protocol: UDP + address: "127.0.0.1" + port_value: 16 + } + } + } + health_status: DEGRADED + load_balancing_weight { + value: 1 + } + } + proximity { + value: 8388608 + } + } + endpoints { + lb_endpoints { + health_status: UNHEALTHY + } + proximity { + value: 50 + } + } + endpoints { + locality { + } + priority: 1 + } + endpoints { + locality { + } + lb_endpoints { + health_status: HEALTHY + } + lb_endpoints { + endpoint { + address { + socket_address { + address: "\025" + ipv4_compat: true + } + } + health_check_config { + hostname: "\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177\177" + } + } + health_status: HEALTHY + } + lb_endpoints { + endpoint { + address { + socket_address { + protocol: UDP + address: "127.0.0.1" + port_value: 16 + } + } + } + health_status: HEALTHY + } + priority: 16 + } + endpoints { + locality { + } + lb_endpoints { + metadata { + filter_metadata { + key: "" + value { + } + } + filter_metadata { + key: "\025" + value { + } + } + filter_metadata { + key: "l" + value { + } + } + } + load_balancing_weight { + value: 1 + } + } + priority: 1 + } + } + respect_dns_ttl: true + load_balancing_policy { + } + upstream_http_protocol_options { + } + track_timeout_budgets: true + preconnect_policy { + } + } +} +dynamic_resources { +} +flags_path: "." +admin { +} +enable_dispatcher_stats: true +stats_server_version_override { +} +default_config_source { + self { + } +} +watchdogs { + worker_watchdog { + miss_timeout { + nanos: 262239 + } + multikill_timeout { + seconds: 2949120 + } + } +} diff --git a/test/server/server_stats_flush_benchmark_test.cc b/test/server/server_stats_flush_benchmark_test.cc new file mode 100644 index 0000000000000..98790c5c65d56 --- /dev/null +++ b/test/server/server_stats_flush_benchmark_test.cc @@ -0,0 +1,74 @@ +#include +#include + +#include "envoy/stats/sink.h" +#include "envoy/stats/stats.h" + +#include "source/common/stats/thread_local_store.h" +#include "source/server/server.h" + +#include "test/benchmark/main.h" +#include "test/mocks/stats/mocks.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "absl/strings/str_cat.h" +#include "benchmark/benchmark.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { + +class StatsSinkFlushSpeedTest { +public: + StatsSinkFlushSpeedTest(size_t const num_stats) + : pool_(symbol_table_), stats_allocator_(symbol_table_), stats_store_(stats_allocator_) { + + // Create counters + for (uint64_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = pool_.add(absl::StrCat("counter.", idx)); + stats_store_.counterFromStatName(stat_name).inc(); + } + // Create gauges + for (uint64_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = pool_.add(absl::StrCat("gauge.", idx)); + stats_store_.gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::NeverImport).set(idx); + } + + // Create text readouts + for (uint64_t idx = 0; idx < num_stats; ++idx) { + auto stat_name = pool_.add(absl::StrCat("text_readout.", idx)); + stats_store_.textReadoutFromStatName(stat_name).set(absl::StrCat("text_readout.", idx)); + } + } + + void test(::benchmark::State& state) { + for (auto _ : state) { + UNREFERENCED_PARAMETER(_); + std::list sinks; + sinks.emplace_back(new testing::NiceMock()); + Server::InstanceUtil::flushMetricsToSinks(sinks, stats_store_, time_system_); + } + } + +private: + Stats::SymbolTableImpl symbol_table_; + Stats::StatNamePool pool_; + Stats::AllocatorImpl stats_allocator_; + Stats::ThreadLocalStoreImpl stats_store_; + Event::SimulatedTimeSystem time_system_; +}; + +static void bmFlushToSinks(::benchmark::State& state) { + // Skip expensive benchmarks for unit tests. + if (benchmark::skipExpensiveBenchmarks() && state.range(0) > 100) { + state.SkipWithError("Skipping expensive benchmark"); + return; + } + + StatsSinkFlushSpeedTest speed_test(state.range(0)); + speed_test.test(state); +} +BENCHMARK(bmFlushToSinks)->Unit(::benchmark::kMillisecond)->RangeMultiplier(10)->Range(10, 1000000); + +} // namespace Envoy diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 115953df50d49..8e6acff4fe85d 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -18,6 +18,7 @@ #include "test/common/config/dummy_config.pb.h" #include "test/common/stats/stat_test_utility.h" +#include "test/config/v2_link_hacks.h" #include "test/integration/server.h" #include "test/mocks/server/bootstrap_extension_factory.h" #include "test/mocks/server/fatal_action_factory.h" @@ -678,17 +679,6 @@ TEST_P(ServerInstanceImplTest, ShutdownBeforeWorkersStarted) { server_thread->join(); } -TEST_P(ServerInstanceImplTest, V2ConfigOnly) { - options_.service_cluster_name_ = "some_cluster_name"; - options_.service_node_name_ = "some_node_name"; - try { - initialize("test/server/test_data/server/unparseable_bootstrap.yaml"); - FAIL(); - } catch (const EnvoyException& e) { - EXPECT_THAT(e.what(), HasSubstr("Unable to parse JSON as proto")); - } -} - TEST_P(ServerInstanceImplTest, Stats) { options_.service_cluster_name_ = "some_cluster_name"; options_.service_node_name_ = "some_node_name"; @@ -788,7 +778,6 @@ TEST_P(ServerStatsTest, FlushStats) { TEST_P(ServerInstanceImplTest, FlushStatsOnAdmin) { CustomStatsSinkFactory factory; Registry::InjectFactory registered(factory); - options_.bootstrap_version_ = 3; auto server_thread = startTestServer("test/server/test_data/server/stats_sink_manual_flush_bootstrap.yaml", true); EXPECT_TRUE(server_->statsConfig().flushOnAdmin()); @@ -815,7 +804,6 @@ TEST_P(ServerInstanceImplTest, FlushStatsOnAdmin) { TEST_P(ServerInstanceImplTest, ConcurrentFlushes) { CustomStatsSinkFactory factory; Registry::InjectFactory registered(factory); - options_.bootstrap_version_ = 3; bool workers_started = false; absl::Notification workers_started_fired; @@ -924,21 +912,6 @@ TEST_P(ServerInstanceImplTest, ValidationAllowStaticRejectDynamic) { EXPECT_EQ(0, TestUtility::findCounter(stats_store_, "server.dynamic_unknown_fields")->value()); } -// Validate server localInfo() from bootstrap Node. -// Deprecated testing of the envoy.api.v2.core.Node.build_version field -TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(BootstrapNodeDeprecated)) { - initialize("test/server/test_data/server/node_bootstrap.yaml"); - EXPECT_EQ("bootstrap_zone", server_->localInfo().zoneName()); - EXPECT_EQ("bootstrap_cluster", server_->localInfo().clusterName()); - EXPECT_EQ("bootstrap_id", server_->localInfo().nodeName()); - EXPECT_EQ("bootstrap_sub_zone", server_->localInfo().node().locality().sub_zone()); - EXPECT_EQ(VersionInfo::version(), - server_->localInfo().node().hidden_envoy_deprecated_build_version()); - EXPECT_EQ("envoy", server_->localInfo().node().user_agent_name()); - EXPECT_TRUE(server_->localInfo().node().has_user_agent_build_version()); - expectCorrectBuildVersion(server_->localInfo().node().user_agent_build_version()); -} - // Validate server localInfo() from bootstrap Node. TEST_P(ServerInstanceImplTest, BootstrapNode) { initialize("test/server/test_data/server/node_bootstrap.yaml"); @@ -976,126 +949,6 @@ TEST_P(ServerInstanceImplTest, UserAgentOverrideFromNode) { EXPECT_EQ(7, server_->localInfo().node().user_agent_build_version().version().patch()); } -// Validate deprecated user agent version field from bootstrap Node. -TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(UserAgentBuildDeprecatedOverrideFromNode)) { - initialize("test/server/test_data/server/node_bootstrap_agent_deprecated_override.yaml"); - EXPECT_EQ("test-ci-user-agent", server_->localInfo().node().user_agent_name()); - EXPECT_EQ("test", server_->localInfo().node().hidden_envoy_deprecated_build_version()); -} - -// Validate that bootstrap with v2 dynamic transport is rejected when --bootstrap-version is not -// set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(FailToLoadV2TransportWithoutExplicitVersion)) { - EXPECT_THROW_WITH_REGEX(initialize("test/server/test_data/server/dynamic_v2.yaml"), - DeprecatedMajorVersionException, - "V2 .and AUTO. xDS transport protocol versions are deprecated in.*"); -} - -// Validate that bootstrap with v2 ADS transport is rejected when --bootstrap-version is not -// set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(FailToLoadV2AdsTransportWithoutExplicitVersion)) { - EXPECT_THROW_WITH_REGEX(initialize("test/server/test_data/server/ads_v2.yaml"), - DeprecatedMajorVersionException, - "V2 .and AUTO. xDS transport protocol versions are deprecated in.*"); -} - -// Validate that bootstrap with v2 HDS transport is rejected when --bootstrap-version is not -// set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(FailToLoadV2HdsTransportWithoutExplicitVersion)) { - // HDS cluster initialization happens through callbacks after runtime initialization. Exceptions - // are caught and will result in server shutdown. - EXPECT_LOG_CONTAINS("warn", - "Skipping initialization of HDS cluster: V2 (and AUTO) xDS transport " - "protocol versions are deprecated", - initialize("test/server/test_data/server/hds_v2.yaml")); -} - -// Validate that bootstrap v2 is rejected when --bootstrap-version is not set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(FailToLoadV2BootstrapWithoutExplicitVersion)) { - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), - DeprecatedMajorVersionException, - "Support for v2 will be removed from Envoy at the start of Q1 2021."); -} - -// Validate that bootstrap v2 pb_text with deprecated fields loads when --bootstrap-version is set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(DISABLED_LoadsV2BootstrapWithExplicitVersionFromPbText)) { - options_.bootstrap_version_ = 2; - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"); - EXPECT_FALSE(server_->localInfo().node().hidden_envoy_deprecated_build_version().empty()); -} - -// Validate that bootstrap v2 pb_text with deprecated fields fails to load when -// --bootstrap-version is not set. -TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(FailToLoadV2BootstrapFromPbText)) { - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), - EnvoyException, "The v2 xDS major version is deprecated and disabled by default."); -} - -// Validate that bootstrap v2 YAML with deprecated fields loads when --bootstrap-version is set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(DISABLED_LoadsV2BootstrapWithExplicitVersionFromYaml)) { - options_.bootstrap_version_ = 2; - EXPECT_LOG_CONTAINS( - "trace", "Configuration does not parse cleanly as v3", - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml")); - EXPECT_FALSE(server_->localInfo().node().hidden_envoy_deprecated_build_version().empty()); -} - -// Validate that bootstrap v2 YAML with deprecated fields fails to load when -// --bootstrap-version is not set. -TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(FailsToLoadV2BootstrapFromYaml)) { - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml"), - EnvoyException, "The v2 xDS major version is deprecated and disabled by default."); -} - -// Validate that bootstrap v3 pb_text with new fields loads fails if V2 config is specified. -TEST_P(ServerInstanceImplTest, DISABLED_FailToLoadV3ConfigWhenV2SelectedFromPbText) { - options_.bootstrap_version_ = 2; - - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text"), - EnvoyException, "Unable to parse file"); -} - -// Validate that bootstrap v3 YAML with new fields loads fails if V2 config is specified. -TEST_P(ServerInstanceImplTest, DISABLED_FailToLoadV3ConfigWhenV2SelectedFromYaml) { - options_.bootstrap_version_ = 2; - - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml"), - EnvoyException, "has unknown fields"); -} - -// Validate that we correctly parse a V2 pb_text file when configured to do so. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(DISABLED_LoadsV2ConfigWhenV2SelectedFromPbText)) { - options_.bootstrap_version_ = 2; - - EXPECT_LOG_CONTAINS( - "trace", "Configuration does not parse cleanly as v3", - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text")); - EXPECT_EQ(server_->localInfo().node().id(), "bootstrap_id"); -} - -// Validate that we correctly parse a V2 YAML file when configured to do so. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(DISABLED_LoadsV2ConfigWhenV2SelectedFromYaml)) { - options_.bootstrap_version_ = 2; - - EXPECT_LOG_CONTAINS( - "trace", "Configuration does not parse cleanly as v3", - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml")); - EXPECT_EQ(server_->localInfo().node().id(), "bootstrap_id"); -} - // Validate that we correctly parse a V3 pb_text file without explicit version configuration. TEST_P(ServerInstanceImplTest, LoadsV3ConfigFromPbText) { EXPECT_LOG_NOT_CONTAINS( @@ -1112,8 +965,6 @@ TEST_P(ServerInstanceImplTest, LoadsV3ConfigFromYaml) { // Validate that we correctly parse a V3 pb_text file when configured to do so. TEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV3SelectedFromPbText) { - options_.bootstrap_version_ = 3; - EXPECT_LOG_NOT_CONTAINS( "trace", "Configuration does not parse cleanly as v3", initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.pb_text")); @@ -1121,52 +972,11 @@ TEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV3SelectedFromPbText) { // Validate that we correctly parse a V3 YAML file when configured to do so. TEST_P(ServerInstanceImplTest, LoadsV3ConfigWhenV3SelectedFromYaml) { - options_.bootstrap_version_ = 3; - EXPECT_LOG_NOT_CONTAINS( "trace", "Configuration does not parse cleanly as v3", initialize("test/server/test_data/server/valid_v3_but_invalid_v2_bootstrap.yaml")); } -// Validate that bootstrap v2 pb_text with deprecated fields loads fails if V3 config is specified. -TEST_P(ServerInstanceImplTest, FailToLoadV2ConfigWhenV3SelectedFromPbText) { - options_.bootstrap_version_ = 3; - - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), - EnvoyException, "Unable to parse file"); -} - -// Validate that bootstrap v2 YAML with deprecated fields loads fails if V3 config is specified. -TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(FailToLoadV2ConfigWhenV3SelectedFromYaml)) { - options_.bootstrap_version_ = 3; - - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml"), - EnvoyException, "has unknown fields"); -} - -// Validate that bootstrap with v2 dynamic transport loads when --bootstrap-version is set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(DISABLED_LoadsV2TransportWithoutExplicitVersion)) { - options_.bootstrap_version_ = 2; - initialize("test/server/test_data/server/dynamic_v2.yaml"); -} - -// Validate that bootstrap with v2 ADS transport loads when --bootstrap-version is set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(DISABLED_LoadsV2AdsTransportWithoutExplicitVersion)) { - options_.bootstrap_version_ = 2; - initialize("test/server/test_data/server/ads_v2.yaml"); -} - -// Validate that bootstrap with v2 HDS transport loads when --bootstrap-version is set. -TEST_P(ServerInstanceImplTest, - DEPRECATED_FEATURE_TEST(DISABLED_LoadsV2HdsTransportWithoutExplicitVersion)) { - options_.bootstrap_version_ = 2; - initialize("test/server/test_data/server/hds_v2.yaml"); -} - // Validate that bootstrap pb_text loads. TEST_P(ServerInstanceImplTest, LoadsBootstrapFromPbText) { EXPECT_LOG_NOT_CONTAINS("trace", "Configuration does not parse cleanly as v3", @@ -1174,24 +984,6 @@ TEST_P(ServerInstanceImplTest, LoadsBootstrapFromPbText) { EXPECT_EQ("bootstrap_id", server_->localInfo().node().id()); } -// Validate that we blow up on invalid version number. -TEST_P(ServerInstanceImplTest, InvalidBootstrapVersion) { - options_.bootstrap_version_ = 1; - - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), - EnvoyException, "Unknown bootstrap version 1."); -} - -// Validate that we always reject v2. -TEST_P(ServerInstanceImplTest, InvalidV2Bootstrap) { - options_.bootstrap_version_ = 2; - - EXPECT_THROW_WITH_REGEX( - initialize("test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text"), - EnvoyException, "v2 bootstrap is deprecated and no longer supported."); -} - TEST_P(ServerInstanceImplTest, LoadsBootstrapFromConfigProtoOptions) { options_.config_proto_.mutable_node()->set_id("foo"); initialize("test/server/test_data/server/node_bootstrap.yaml"); @@ -1266,13 +1058,6 @@ TEST_P(ServerInstanceImplTest, BootstrapRtdsThroughAdsViaEdsFails) { EnvoyException, "Unknown gRPC client cluster"); } -TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(DISABLED_InvalidLegacyBootstrapRuntime)) { - options_.bootstrap_version_ = 2; - EXPECT_THROW_WITH_MESSAGE( - initialize("test/server/test_data/server/invalid_legacy_runtime_bootstrap.yaml"), - EnvoyException, "Invalid runtime entry value for foo"); -} - // Validate invalid runtime in bootstrap is rejected. TEST_P(ServerInstanceImplTest, InvalidBootstrapRuntime) { EXPECT_THROW_WITH_MESSAGE( @@ -1376,7 +1161,7 @@ TEST_P(ServerInstanceImplTest, BootstrapNodeWithSocketOptions) { // Start Envoy instance with admin port with SO_REUSEPORT option. ASSERT_NO_THROW( initialize("test/server/test_data/server/node_bootstrap_with_admin_socket_options.yaml")); - const auto address = server_->admin().socket().addressProvider().localAddress(); + const auto address = server_->admin().socket().connectionInfoProvider().localAddress(); // First attempt to bind and listen socket should fail due to the lack of SO_REUSEPORT socket // options. @@ -1525,7 +1310,6 @@ TEST_P(ServerInstanceImplTest, NoHttpTracing) { TEST_P(ServerInstanceImplTest, DEPRECATED_FEATURE_TEST(DISABLED_ZipkinHttpTracingEnabled)) { options_.service_cluster_name_ = "some_cluster_name"; options_.service_node_name_ = "some_node_name"; - options_.bootstrap_version_ = 2; EXPECT_NO_THROW(initialize("test/server/test_data/server/zipkin_tracing_deprecated_config.yaml")); EXPECT_EQ("zipkin", server_->httpContext().defaultTracingConfig().http().name()); } diff --git a/test/server/test_data/server/ads_v2.yaml b/test/server/test_data/server/ads_v2.yaml deleted file mode 100644 index 03e5693b75cbe..0000000000000 --- a/test/server/test_data/server/ads_v2.yaml +++ /dev/null @@ -1,14 +0,0 @@ -node: - id: bootstrap_id - cluster: bootstrap_cluster -static_resources: - clusters: - - name: dummy_cluster - connect_timeout: 1s -dynamic_resources: - ads_config: - api_type: GRPC - transport_api_version: V2 - grpc_services: - envoy_grpc: - cluster_name: "dummy_cluster" diff --git a/test/server/test_data/server/dynamic_v2.yaml b/test/server/test_data/server/dynamic_v2.yaml deleted file mode 100644 index bf924ec1639e9..0000000000000 --- a/test/server/test_data/server/dynamic_v2.yaml +++ /dev/null @@ -1,16 +0,0 @@ -node: - id: bootstrap_id - cluster: bootstrap_cluster -static_resources: - clusters: - - name: dummy_cluster - connect_timeout: 1s -dynamic_resources: - lds_config: - resource_api_version: V3 - api_config_source: - api_type: GRPC - transport_api_version: V2 - grpc_services: - envoy_grpc: - cluster_name: "dummy_cluster" diff --git a/test/server/test_data/server/hds_v2.yaml b/test/server/test_data/server/hds_v2.yaml deleted file mode 100644 index fb87f8239e63d..0000000000000 --- a/test/server/test_data/server/hds_v2.yaml +++ /dev/null @@ -1,13 +0,0 @@ -node: - id: bootstrap_id - cluster: bootstrap_cluster -static_resources: - clusters: - - name: dummy_cluster - connect_timeout: 1s -hds_config: - api_type: GRPC - transport_api_version: V2 - grpc_services: - envoy_grpc: - cluster_name: "dummy_cluster" diff --git a/test/server/test_data/server/invalid_legacy_runtime_bootstrap.yaml b/test/server/test_data/server/invalid_legacy_runtime_bootstrap.yaml deleted file mode 100644 index 99c67b7d2d9c0..0000000000000 --- a/test/server/test_data/server/invalid_legacy_runtime_bootstrap.yaml +++ /dev/null @@ -1,4 +0,0 @@ -runtime: - base: - foo: - - bar: baz diff --git a/test/server/test_data/server/node_bootstrap.pb_text b/test/server/test_data/server/node_bootstrap.pb_text index f47df39a83510..7fa148fc6a30d 100644 --- a/test/server/test_data/server/node_bootstrap.pb_text +++ b/test/server/test_data/server/node_bootstrap.pb_text @@ -7,7 +7,6 @@ node { } } admin { - access_log_path: "{{ null_device_path }}" address { socket_address { address: "{{ ntop_ip_loopback_address }}" diff --git a/test/server/test_data/server/node_bootstrap.yaml b/test/server/test_data/server/node_bootstrap.yaml index f662534dfeb47..c47464b5e7d5d 100644 --- a/test/server/test_data/server/node_bootstrap.yaml +++ b/test/server/test_data/server/node_bootstrap.yaml @@ -5,7 +5,6 @@ node: zone: bootstrap_zone sub_zone: bootstrap_sub_zone admin: - access_log_path: "{{ null_device_path }}" address: socket_address: address: "{{ ntop_ip_loopback_address }}" diff --git a/test/server/test_data/server/node_bootstrap_agent_deprecated_override.yaml b/test/server/test_data/server/node_bootstrap_agent_deprecated_override.yaml index 1b466cd4a8876..bcbf7c334e525 100644 --- a/test/server/test_data/server/node_bootstrap_agent_deprecated_override.yaml +++ b/test/server/test_data/server/node_bootstrap_agent_deprecated_override.yaml @@ -4,7 +4,6 @@ node: user_agent_name: test-ci-user-agent user_agent_version: test admin: - access_log_path: "{{ null_device_path }}" address: socket_address: address: "{{ ntop_ip_loopback_address }}" diff --git a/test/server/test_data/server/node_bootstrap_agent_override.yaml b/test/server/test_data/server/node_bootstrap_agent_override.yaml index bfd5dae1cf00c..b7e9c189826ff 100644 --- a/test/server/test_data/server/node_bootstrap_agent_override.yaml +++ b/test/server/test_data/server/node_bootstrap_agent_override.yaml @@ -8,7 +8,6 @@ node: minor_number: 8 patch: 7 admin: - access_log_path: "{{ null_device_path }}" address: socket_address: address: "{{ ntop_ip_loopback_address }}" diff --git a/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text b/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text deleted file mode 100644 index 93ce0b49473b4..0000000000000 --- a/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.pb_text +++ /dev/null @@ -1,4 +0,0 @@ -node { - id: "bootstrap_id" - build_version: "foo" -} diff --git a/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml b/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml deleted file mode 100644 index f8ff1b6a9375b..0000000000000 --- a/test/server/test_data/server/valid_v2_but_invalid_v3_bootstrap.yaml +++ /dev/null @@ -1,3 +0,0 @@ -node: - id: "bootstrap_id" - build_version: "foo" diff --git a/test/server/utility.h b/test/server/utility.h index fc5139fff0a16..bb3b2590ffcba 100644 --- a/test/server/utility.h +++ b/test/server/utility.h @@ -13,10 +13,9 @@ namespace Envoy { namespace Server { namespace { -inline envoy::config::listener::v3::Listener parseListenerFromV3Yaml(const std::string& yaml, - bool avoid_boosting = true) { +inline envoy::config::listener::v3::Listener parseListenerFromV3Yaml(const std::string& yaml) { envoy::config::listener::v3::Listener listener; - TestUtility::loadFromYamlAndValidate(yaml, listener, true, avoid_boosting); + TestUtility::loadFromYamlAndValidate(yaml, listener); return listener; } diff --git a/test/server/worker_impl_test.cc b/test/server/worker_impl_test.cc index e83aa447c6ee4..7fd1fd0631d98 100644 --- a/test/server/worker_impl_test.cc +++ b/test/server/worker_impl_test.cc @@ -33,8 +33,9 @@ class WorkerImplTest : public testing::Test { WorkerImplTest() : api_(Api::createApiForTest()), dispatcher_(api_->allocateDispatcher("worker_test")), no_exit_timer_(dispatcher_->createTimer([]() -> void {})), + stat_names_(api_->rootScope().symbolTable()), worker_(tls_, hooks_, std::move(dispatcher_), Network::ConnectionHandlerPtr{handler_}, - overload_manager_, *api_) { + overload_manager_, *api_, stat_names_) { // In the real worker the watchdog has timers that prevent exit. Here we need to prevent event // loop exit since we use mock timers. no_exit_timer_->enableTimer(std::chrono::hours(1)); @@ -55,6 +56,7 @@ class WorkerImplTest : public testing::Test { Event::DispatcherPtr dispatcher_; DefaultListenerHooks hooks_; Event::TimerPtr no_exit_timer_; + WorkerStatNames stat_names_; WorkerImpl worker_; }; diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 75a5445637854..9e4ecbdeee869 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -124,7 +124,6 @@ envoy_cc_test_library( "//source/common/common:utility_lib", "//source/common/config:decoded_resource_lib", "//source/common/config:opaque_resource_decoder_lib", - "//source/common/config:version_converter_lib", "//source/common/filesystem:directory_lib", "//source/common/filesystem:filesystem_lib", "//source/common/http:header_map_lib", @@ -135,6 +134,7 @@ envoy_cc_test_library( "//source/common/protobuf:utility_lib", "//source/common/stats:stats_lib", "//test/mocks/stats:stats_mocks", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 6cf11038fbd77..637d238438c79 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -57,7 +57,7 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared << ")"; return nullptr; } - return sock.addressProvider().localAddress(); + return sock.connectionInfoProvider().localAddress(); } Address::InstanceConstSharedPtr findOrCheckFreePort(const std::string& addr_port, @@ -179,7 +179,7 @@ bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_p throw EnvoyException(msg); } - return std::make_pair(sock->addressProvider().localAddress(), std::move(sock)); + return std::make_pair(sock->connectionInfoProvider().localAddress(), std::move(sock)); } TransportSocketPtr createRawBufferSocket() { return std::make_unique(); } @@ -244,7 +244,7 @@ void UdpSyncPeer::write(const std::string& buffer, const Network::Address::Insta void UdpSyncPeer::recv(Network::UdpRecvData& datagram) { if (received_datagrams_.empty()) { const auto rc = Network::Test::readFromSocket(socket_->ioHandle(), - *socket_->addressProvider().localAddress(), + *socket_->connectionInfoProvider().localAddress(), received_datagrams_, max_rx_datagram_size_); ASSERT_TRUE(rc.ok()); } diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index ab60a012672cc..c7a9b7b85b672 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -203,7 +203,7 @@ class UdpSyncPeer { // Return the local peer's socket address. const Network::Address::InstanceConstSharedPtr& localAddress() { - return socket_->addressProvider().localAddress(); + return socket_->connectionInfoProvider().localAddress(); } private: diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 35b5305cd0a9b..c2f68cc000c6d 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -12,6 +12,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/platform.h" +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/config/listener/v3/listener.pb.h" @@ -175,7 +176,14 @@ AssertionResult TestUtility::waitForCounterEq(Stats::Store& store, const std::st while (findCounter(store, name) == nullptr || findCounter(store, name)->value() != value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); if (timeout != std::chrono::milliseconds::zero() && !bound.withinBound()) { - return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + std::string current_value; + if (findCounter(store, name)) { + current_value = absl::StrCat(findCounter(store, name)->value()); + } else { + current_value = "nil"; + } + return AssertionFailure() << fmt::format( + "timed out waiting for {} to be {}, current value {}", name, value, current_value); } if (dispatcher != nullptr) { dispatcher->run(Event::Dispatcher::RunType::NonBlock); @@ -217,12 +225,27 @@ AssertionResult TestUtility::waitForGaugeEq(Stats::Store& store, const std::stri while (findGauge(store, name) == nullptr || findGauge(store, name)->value() != value) { time_system.advanceTimeWait(std::chrono::milliseconds(10)); if (timeout != std::chrono::milliseconds::zero() && !bound.withinBound()) { - return AssertionFailure() << fmt::format("timed out waiting for {} to be {}", name, value); + std::string current_value; + if (findGauge(store, name)) { + current_value = absl::StrCat(findGauge(store, name)->value()); + } else { + current_value = "nil"; + } + return AssertionFailure() << fmt::format( + "timed out waiting for {} to be {}, current value {}", name, value, current_value); } } return AssertionSuccess(); } +AssertionResult TestUtility::waitForGaugeDestroyed(Stats::Store& store, const std::string& name, + Event::TestTimeSystem& time_system) { + while (findGauge(store, name) == nullptr) { + time_system.advanceTimeWait(std::chrono::milliseconds(10)); + } + return AssertionSuccess(); +} + AssertionResult TestUtility::waitUntilHistogramHasSamples(Stats::Store& store, const std::string& name, Event::TestTimeSystem& time_system, @@ -399,6 +422,7 @@ class TestImplProvider { Event::GlobalTimeSystem global_time_system_; testing::NiceMock default_stats_store_; testing::NiceMock mock_random_generator_; + envoy::config::bootstrap::v3::Bootstrap empty_bootstrap_; }; class TestImpl : public TestImplProvider, public Impl { @@ -408,7 +432,7 @@ class TestImpl : public TestImplProvider, public Impl { Random::RandomGenerator* random = nullptr) : Impl(thread_factory, stats_store ? *stats_store : default_stats_store_, time_system ? *time_system : global_time_system_, file_system, - random ? *random : mock_random_generator_) {} + random ? *random : mock_random_generator_, empty_bootstrap_) {} }; ApiPtr createApiForTest() { diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 57c2162ce82ec..e7fa952614c92 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -21,7 +21,6 @@ #include "source/common/common/thread.h" #include "source/common/config/decoded_resource_impl.h" #include "source/common/config/opaque_resource_decoder_impl.h" -#include "source/common/config/version_converter.h" #include "source/common/http/header_map_impl.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/common/protobuf/utility.h" @@ -278,6 +277,17 @@ class TestUtility { Event::TestTimeSystem& time_system, std::chrono::milliseconds timeout = std::chrono::milliseconds::zero()); + /** + * Wait for a gauge to be destroyed. + * @param store supplies the stats store. + * @param name gauge name. + * @param time_system the time system to use for waiting. + * @return AssertionSuccess() if the gauge was == to the value within the timeout, else + * AssertionFailure(). + */ + static AssertionResult waitForGaugeDestroyed(Stats::Store& store, const std::string& name, + Event::TestTimeSystem& time_system); + /** * Wait for a histogram to have samples. * @param store supplies the stats store. @@ -601,34 +611,20 @@ class TestUtility { static std::string nonZeroedGauges(const std::vector& gauges); // Strict variants of Protobuf::MessageUtil - static void loadFromJson(const std::string& json, Protobuf::Message& message, - bool preserve_original_type = false, bool avoid_boosting = false) { - MessageUtil::loadFromJson(json, message, ProtobufMessage::getStrictValidationVisitor(), - !avoid_boosting); - if (!preserve_original_type) { - Config::VersionConverter::eraseOriginalTypeInformation(message); - } + static void loadFromJson(const std::string& json, Protobuf::Message& message) { + MessageUtil::loadFromJson(json, message, ProtobufMessage::getStrictValidationVisitor()); } static void loadFromJson(const std::string& json, ProtobufWkt::Struct& message) { MessageUtil::loadFromJson(json, message); } - static void loadFromYaml(const std::string& yaml, Protobuf::Message& message, - bool preserve_original_type = false, bool avoid_boosting = false) { - MessageUtil::loadFromYaml(yaml, message, ProtobufMessage::getStrictValidationVisitor(), - !avoid_boosting); - if (!preserve_original_type) { - Config::VersionConverter::eraseOriginalTypeInformation(message); - } + static void loadFromYaml(const std::string& yaml, Protobuf::Message& message) { + MessageUtil::loadFromYaml(yaml, message, ProtobufMessage::getStrictValidationVisitor()); } - static void loadFromFile(const std::string& path, Protobuf::Message& message, Api::Api& api, - bool preserve_original_type = false) { + static void loadFromFile(const std::string& path, Protobuf::Message& message, Api::Api& api) { MessageUtil::loadFromFile(path, message, ProtobufMessage::getStrictValidationVisitor(), api); - if (!preserve_original_type) { - Config::VersionConverter::eraseOriginalTypeInformation(message); - } } template @@ -637,14 +633,9 @@ class TestUtility { } template - static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message, - bool preserve_original_type = false, - bool avoid_boosting = false) { - MessageUtil::loadFromYamlAndValidate( - yaml, message, ProtobufMessage::getStrictValidationVisitor(), avoid_boosting); - if (!preserve_original_type) { - Config::VersionConverter::eraseOriginalTypeInformation(message); - } + static void loadFromYamlAndValidate(const std::string& yaml, MessageType& message) { + MessageUtil::loadFromYamlAndValidate(yaml, message, + ProtobufMessage::getStrictValidationVisitor()); } template static void validate(const MessageType& message) { @@ -776,50 +767,6 @@ class TestUtility { NOT_REACHED_GCOVR_EXCL_LINE; } } - - /** - * Returns the fully-qualified name of a service, rendered from service_full_name_template. - * - * @param service_full_name_template the service fully-qualified name template. - * @param api_version version of a service. - * @param service_namespace to override the service namespace. - * @return std::string full path of a service method. - */ - static std::string - getVersionedServiceFullName(const std::string& service_full_name_template, - envoy::config::core::v3::ApiVersion api_version, - const std::string& service_namespace = EMPTY_STRING) { - switch (api_version) { - case envoy::config::core::v3::ApiVersion::AUTO: - FALLTHRU; - case envoy::config::core::v3::ApiVersion::V2: - return fmt::format(service_full_name_template, "v2", service_namespace); - - case envoy::config::core::v3::ApiVersion::V3: - return fmt::format(service_full_name_template, "v3", service_namespace); - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - } - - /** - * Returns the full path of a service method. - * - * @param service_full_name_template the service fully-qualified name template. - * @param method_name the method name. - * @param api_version version of a service method. - * @param service_namespace to override the service namespace. - * @return std::string full path of a service method. - */ - static std::string getVersionedMethodPath(const std::string& service_full_name_template, - absl::string_view method_name, - envoy::config::core::v3::ApiVersion api_version, - const std::string& service_namespace = EMPTY_STRING) { - return absl::StrCat( - "/", - getVersionedServiceFullName(service_full_name_template, api_version, service_namespace), - "/", method_name); - } }; /** @@ -870,19 +817,36 @@ class TestTraceContextImpl : public Tracing::TraceContext { context_map_[value.first] = value.second; } } - - absl::optional getTraceContext(absl::string_view key) const override { + absl::string_view protocol() const override { return context_protocol_; } + absl::string_view authority() const override { return context_authority_; } + absl::string_view path() const override { return context_path_; } + absl::string_view method() const override { return context_method_; } + void forEach(IterateCallback callback) const override { + for (const auto& pair : context_map_) { + if (!callback(pair.first, pair.second)) { + break; + } + } + } + absl::optional getByKey(absl::string_view key) const override { auto iter = context_map_.find(key); if (iter == context_map_.end()) { return absl::nullopt; } return iter->second; } - - void setTraceContext(absl::string_view key, absl::string_view val) override { + void setByKey(absl::string_view key, absl::string_view val) override { context_map_.insert({std::string(key), std::string(val)}); } + void setByReferenceKey(absl::string_view key, absl::string_view val) override { + setByKey(key, val); + } + void setByReference(absl::string_view key, absl::string_view val) override { setByKey(key, val); } + std::string context_protocol_; + std::string context_authority_; + std::string context_path_; + std::string context_method_; absl::flat_hash_map context_map_; }; @@ -1102,23 +1066,35 @@ class TestRequestHeaderMapImpl INLINE_REQ_RESP_STRING_HEADERS(DEFINE_TEST_INLINE_STRING_HEADER_FUNCS) INLINE_REQ_RESP_NUMERIC_HEADERS(DEFINE_TEST_INLINE_NUMERIC_HEADER_FUNCS) - absl::optional getTraceContext(absl::string_view key) const override { + // Tracing::TraceContext + absl::string_view protocol() const override { return header_map_->getProtocolValue(); } + absl::string_view authority() const override { return header_map_->getHostValue(); } + absl::string_view path() const override { return header_map_->getPathValue(); } + absl::string_view method() const override { return header_map_->getMethodValue(); } + void forEach(IterateCallback callback) const override { ASSERT(header_map_); - return header_map_->getTraceContext(key); + header_map_->iterate([cb = std::move(callback)](const HeaderEntry& entry) { + if (cb(entry.key().getStringView(), entry.value().getStringView())) { + return HeaderMap::Iterate::Continue; + } + return HeaderMap::Iterate::Break; + }); } - void setTraceContext(absl::string_view key, absl::string_view value) override { + absl::optional getByKey(absl::string_view key) const override { ASSERT(header_map_); - header_map_->setTraceContext(key, value); + return header_map_->getByKey(key); } - - void setTraceContextReferenceKey(absl::string_view key, absl::string_view val) override { + void setByKey(absl::string_view key, absl::string_view value) override { ASSERT(header_map_); - header_map_->setTraceContextReferenceKey(key, val); + header_map_->setByKey(key, value); } - - void setTraceContextReference(absl::string_view key, absl::string_view val) override { + void setByReference(absl::string_view key, absl::string_view val) override { + ASSERT(header_map_); + header_map_->setByReference(key, val); + } + void setByReferenceKey(absl::string_view key, absl::string_view val) override { ASSERT(header_map_); - header_map_->setTraceContextReference(key, val); + header_map_->setByReferenceKey(key, val); } }; diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 4d21e67c5a752..9a6535515fcab 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -247,11 +247,11 @@ bool RouterCheckTool::compareEntries(const std::string& expected_routes) { validation_config.tests()) { active_runtime_ = check_config.input().runtime(); headers_finalized_ = false; - auto address_provider = std::make_shared( + auto connection_info_provider = std::make_shared( nullptr, Network::Utility::getCanonicalIpv4LoopbackAddress()); Envoy::StreamInfo::StreamInfoImpl stream_info(Envoy::Http::Protocol::Http11, factory_context_->dispatcher().timeSource(), - address_provider); + connection_info_provider); ToolConfig tool_config = ToolConfig::create(check_config); tool_config.route_ = config_->route(*tool_config.request_headers_, stream_info, tool_config.random_value_); diff --git a/tools/api/generate_go_protobuf.py b/tools/api/generate_go_protobuf.py index 833e0b87322cc..6444bef2df8ed 100755 --- a/tools/api/generate_go_protobuf.py +++ b/tools/api/generate_go_protobuf.py @@ -98,13 +98,14 @@ def write_revision_info(repo, sha): def sync_go_protobufs(output, repo): - # Sync generated content against repo and return true if there is a commit necessary - dst = os.path.join(repo, 'envoy') - # Remove subtree at envoy in repo - git(repo, 'rm', '-r', 'envoy') - # Copy subtree at envoy from output to repo - shutil.copytree(os.path.join(output, 'envoy'), dst) - git(repo, 'add', 'envoy') + for folder in ['envoy', 'contrib']: + # Sync generated content against repo and return true if there is a commit necessary + dst = os.path.join(repo, folder) + # Remove subtree in repo + git(repo, 'rm', '-r', '--ignore-unmatch', folder) + # Copy subtree from output to repo + shutil.copytree(os.path.join(output, folder), dst) + git(repo, 'add', folder) def publish_go_protobufs(repo, sha): @@ -112,6 +113,7 @@ def publish_go_protobufs(repo, sha): git(repo, 'config', 'user.name', USER_NAME) git(repo, 'config', 'user.email', USER_EMAIL) git(repo, 'add', 'envoy') + git(repo, 'add', 'contrib') git(repo, 'commit', '--allow-empty', '-s', '-m', MIRROR_MSG + sha) git(repo, 'push', 'origin', BRANCH) diff --git a/tools/api_proto_breaking_change_detector/BUILD b/tools/api_proto_breaking_change_detector/BUILD new file mode 100644 index 0000000000000..bce76c323d361 --- /dev/null +++ b/tools/api_proto_breaking_change_detector/BUILD @@ -0,0 +1,56 @@ +load("@rules_python//python:defs.bzl", "py_binary", "py_library", "py_test") + +licenses(["notice"]) # Apache 2 + +py_binary( + name = "detector", + srcs = [ + "detector.py", + ], + data = [ + "@com_github_bufbuild_buf//:buf", + "@envoy_api_canonical//:proto_breaking_change_detector_buf_config", + ], + main = "detector.py", + tags = ["manual"], + deps = [ + ":buf_utils", + ":detector_errors", + "//tools:run_command", + ], +) + +py_library( + name = "buf_utils", + srcs = [ + "buf_utils.py", + ], + deps = [ + ":detector_errors", + "//tools/base:utils", + ], +) + +py_library( + name = "detector_errors", + srcs = [ + "detector_errors.py", + ], +) + +py_test( + name = "detector_test", + srcs = ["detector_test.py"], + data = [ + "//tools/testdata/api_proto_breaking_change_detector:proto_breaking_change_detector_testdata", + ], + main = "detector_test.py", + python_version = "PY3", + srcs_version = "PY3", + tags = ["manual"], + deps = [ + ":detector", + "//tools:run_command", + "@rules_python//python/runfiles", + ], +) diff --git a/tools/api_proto_breaking_change_detector/buf_utils.py b/tools/api_proto_breaking_change_detector/buf_utils.py new file mode 100644 index 0000000000000..1c279e84a49c4 --- /dev/null +++ b/tools/api_proto_breaking_change_detector/buf_utils.py @@ -0,0 +1,105 @@ +from pathlib import Path +from typing import List, Union, Tuple + +from detector_errors import ChangeDetectorError, ChangeDetectorInitializeError +from tools.base.utils import cd_and_return +from tools.run_command import run_command + + +def _generate_buf_args(target_path, config_file_loc, additional_args): + buf_args = [] + + # buf requires relative pathing for roots + target_relative = Path(target_path).absolute().relative_to(Path.cwd().absolute()) + + # buf does not accept . as a root; if we are already in the target dir, no need for a --path arg + if str(target_relative) != ".": + buf_args.extend(["--path", str(target_relative)]) + + if config_file_loc: + buf_args.extend(["--config", str(config_file_loc)]) + + buf_args.extend(additional_args or []) + + return buf_args + + +def _cd_into_config_parent(config_file_loc): + config_parent = Path(config_file_loc).parent if config_file_loc else Path.cwd() + return cd_and_return(config_parent) + + +def pull_buf_deps( + buf_path: Union[str, Path], + target_path: Union[str, Path], + config_file_loc: Union[str, Path] = None, + additional_args: List[str] = None) -> None: + """Updates buf.lock file and downloads any BSR dependencies specified in buf.yaml + + Note that in order for dependency downloading to trigger, `buf build` + must be invoked, so `target_path` must contain valid proto syntax. + + Args: + buf_path {Union[str, Path]} -- path to buf binary to use + target_path {Union[str, Path]} -- path to directory containing protos to run buf on + + config_file_loc {Union[str, Path]} -- absolute path to buf.yaml configuration file (if not provided, uses default buf configuration) + additional_args {List[str]} -- additional arguments passed into the buf binary invocations + + Raises: + ChangeDetectorInitializeError: if buf encounters an error while attempting to update the buf.lock file or build afterward + """ + with _cd_into_config_parent(config_file_loc): + buf_args = _generate_buf_args(target_path, config_file_loc, additional_args) + + update_code, _, update_err = run_command(f'{buf_path} mod update') + # for some reason buf prints out the "downloading..." lines on stderr + if update_code != 0: + raise ChangeDetectorInitializeError( + f"Error running `buf mod update`: exit status code {update_code} | stderr: {''.join(update_err)}" + ) + if not Path.cwd().joinpath("buf.lock").exists(): + raise ChangeDetectorInitializeError( + "buf mod update did not generate a buf.lock file (silent error... incorrect config?)" + ) + + run_command(' '.join([f'{buf_path} build', *buf_args])) + + +def check_breaking( + buf_path: Union[str, Path], + target_path: Union[str, Path], + git_ref: str, + git_path: Union[str, Path], + config_file_loc: Union[str, Path] = None, + additional_args: List[str] = None, + subdir: str = None) -> Tuple[int, List[str], List[str]]: + """Runs `buf breaking` to check for breaking changes between the `target_path` protos and the provided initial state + + Args: + buf_path {Union[str, Path]} -- path to buf binary to use + target_path {Union[str, Path]} -- path to directory containing protos to check for breaking changes + git_ref {str} -- git reference to use for the initial state of the protos (typically a commit hash) + git_path {Union[str, Path]} -- absolute path to .git folder for the repository of interest + + subdir {str} -- subdirectory within git repository from which to search for .proto files (default: None, e.g. stay in root) + config_file_loc {Union[str, Path]} -- absolute path to buf.yaml configuration file (if not provided, uses default buf configuration) + additional_args {List[str]} -- additional arguments passed into the buf binary invocations + + Returns: + Tuple[int, List[str], List[str]] -- tuple of (exit status code, stdout, stderr) as provided by run_command. Note stdout/stderr are provided as string lists + """ + with _cd_into_config_parent(config_file_loc): + if not Path(git_path).exists(): + raise ChangeDetectorError(f'path to .git folder {git_path} does not exist') + + buf_args = _generate_buf_args(target_path, config_file_loc, additional_args) + + initial_state_input = f'{git_path}#ref={git_ref}' + + if subdir: + initial_state_input += f',subdir={subdir}' + + final_code, final_out, final_err = run_command( + ' '.join([buf_path, f"breaking --against {initial_state_input}", *buf_args])) + return final_code, final_out, final_err diff --git a/tools/api_proto_breaking_change_detector/detector.py b/tools/api_proto_breaking_change_detector/detector.py new file mode 100644 index 0000000000000..c5a66be94214c --- /dev/null +++ b/tools/api_proto_breaking_change_detector/detector.py @@ -0,0 +1,133 @@ +""" Protocol Buffer Breaking Change Detector + +This tool is used to detect "breaking changes" in protobuf files, to +ensure proper backwards-compatibility in protobuf API updates. The tool +can check for breaking changes of a single API by taking 2 .proto file +paths as input (before and after) and outputting a bool `is_breaking`. + +The breaking change detector creates a temporary directory, copies in +each file to compute a protobuf "state", computes a diff of the "before" +and "after" states, and runs the diff against a set of rules to determine +if there was a breaking change. + +The tool is currently implemented with buf (https://buf.build/) +""" + +from pathlib import Path +from typing import List + +from buf_utils import check_breaking, pull_buf_deps +from detector_errors import ChangeDetectorError + + +class ProtoBreakingChangeDetector(object): + """Abstract breaking change detector interface""" + + def run_detector(self) -> None: + """Run the breaking change detector to detect rule violations + + This method should populate the detector's internal data such + that `is_breaking` does not require any additional invocations + to the breaking change detector. + """ + pass + + def is_breaking(self) -> bool: + """Return True if breaking changes were detected in the given protos""" + pass + + def get_breaking_changes(self) -> List[str]: + """Return a list of strings containing breaking changes output by the tool""" + pass + + +class BufWrapper(ProtoBreakingChangeDetector): + """Breaking change detector implemented with buf""" + + def __init__( + self, + path_to_changed_dir: str, + git_ref: str, + git_path: str, + subdir: str = None, + buf_path: str = None, + config_file_loc: str = None, + additional_args: List[str] = None) -> None: + """Initialize the configuration of buf + + This function sets up any necessary config without actually + running buf against any proto files. + + BufWrapper takes a path to a directory containing proto files + as input, and it checks if these proto files break any changes + from a given initial state. + + The initial state is input as a git ref. The constructor expects + a git ref string, as well as an absolute path to a .git folder + for the repository. + + Args: + path_to_changed_dir {str} -- absolute path to a directory containing proto files in the after state + buf_path {str} -- path to the buf binary (default: "buf") + git_ref {str} -- git reference to use for the initial state of the protos (typically a commit hash) + git_path {str} -- absolute path to .git folder for the repository of interest + + subdir {str} -- subdirectory within git repository from which to search for .proto files (default: None, e.g. stay in root) + additional_args {List[str]} -- additional arguments passed into the buf binary invocations + config_file_loc {str} -- absolute path to buf.yaml configuration file (if not provided, uses default buf configuration) + """ + if not Path(path_to_changed_dir).is_dir(): + raise ValueError(f"path_to_changed_dir {path_to_changed_dir} is not a valid directory") + + if Path.cwd() not in Path(path_to_changed_dir).parents: + raise ValueError( + f"path_to_changed_dir {path_to_changed_dir} must be a subdirectory of the cwd ({ Path.cwd() })" + ) + + if not Path(git_path).exists(): + raise ChangeDetectorError(f'path to .git folder {git_path} does not exist') + + self._path_to_changed_dir = path_to_changed_dir + self._additional_args = additional_args + self._buf_path = buf_path or "buf" + self._config_file_loc = config_file_loc + self._git_ref = git_ref + self._git_path = git_path + self._subdir = subdir + self._final_result = None + + pull_buf_deps( + self._buf_path, + self._path_to_changed_dir, + config_file_loc=self._config_file_loc, + additional_args=self._additional_args) + + def run_detector(self) -> None: + self._final_result = check_breaking( + self._buf_path, + self._path_to_changed_dir, + git_ref=self._git_ref, + git_path=self._git_path, + subdir=self._subdir, + config_file_loc=self._config_file_loc, + additional_args=self._additional_args) + + def is_breaking(self) -> bool: + if not self._final_result: + raise ChangeDetectorError("Must invoke run_detector() before checking if is_breaking()") + + final_code, final_out, final_err = self._final_result + final_out, final_err = '\n'.join(final_out), '\n'.join(final_err) + + if final_err != "": + raise ChangeDetectorError(f"Error from buf: {final_err}") + + if final_code != 0: + return True + if final_out != "": + return True + return False + + def get_breaking_changes(self) -> List[str]: + _, final_out, _ = self._final_result + return filter(lambda x: len(x) > 0, final_out) if self.is_breaking() else [] diff --git a/tools/api_proto_breaking_change_detector/detector_ci.py b/tools/api_proto_breaking_change_detector/detector_ci.py new file mode 100755 index 0000000000000..84278d6d4b075 --- /dev/null +++ b/tools/api_proto_breaking_change_detector/detector_ci.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 + +import argparse +import sys +from pathlib import Path + +from detector import BufWrapper + +API_DIR = Path("api").resolve() +GIT_PATH = Path.cwd().joinpath(".git") +CONFIG_FILE_LOC = Path(API_DIR, "buf.yaml") + + +def detect_breaking_changes_git(path_to_buf, ref): + """Returns True if breaking changes were detected in the api folder""" + detector = BufWrapper( + API_DIR, + buf_path=path_to_buf, + config_file_loc=CONFIG_FILE_LOC, + git_ref=ref, + git_path=GIT_PATH, + subdir="api") + detector.run_detector() + breaking = detector.is_breaking() + + if breaking: + print('Breaking changes detected in api protobufs:') + for i, breaking_change in enumerate(detector.get_breaking_changes()): + print(f'\t{i}: {breaking_change}') + print("ERROR: non-backwards-compatible changes detected in api protobufs.") + return breaking + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description= + 'Tool to detect breaking changes in api protobufs and enforce backwards compatibility.') + parser.add_argument('buf_path', type=str, help='path to buf binary') + parser.add_argument( + 'git_ref', type=str, help='git reference to check against for breaking changes') + args = parser.parse_args() + + exit_status = detect_breaking_changes_git(args.buf_path, args.git_ref) + sys.exit(exit_status) diff --git a/tools/api_proto_breaking_change_detector/detector_ci.sh b/tools/api_proto_breaking_change_detector/detector_ci.sh new file mode 100755 index 0000000000000..7f0ec3271e1e5 --- /dev/null +++ b/tools/api_proto_breaking_change_detector/detector_ci.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +tools="$(dirname "$(dirname "$(realpath "$0")")")" +root=$(realpath "$tools/..") + +cd "$root" || exit 1 +# to satisfy dependency on run_command (as done in tools/code_format/check_format_test_helper.sh) +export PYTHONPATH="$root" +./tools/api_proto_breaking_change_detector/detector_ci.py "$@" diff --git a/tools/api_proto_breaking_change_detector/detector_errors.py b/tools/api_proto_breaking_change_detector/detector_errors.py new file mode 100644 index 0000000000000..724bcfd52fc7a --- /dev/null +++ b/tools/api_proto_breaking_change_detector/detector_errors.py @@ -0,0 +1,6 @@ +class ChangeDetectorError(Exception): + pass + + +class ChangeDetectorInitializeError(ChangeDetectorError): + pass diff --git a/tools/api_proto_breaking_change_detector/detector_test.py b/tools/api_proto_breaking_change_detector/detector_test.py new file mode 100644 index 0000000000000..1566bf2d500d8 --- /dev/null +++ b/tools/api_proto_breaking_change_detector/detector_test.py @@ -0,0 +1,175 @@ +""" Proto Breaking Change Detector Test Suite + +This script evaluates breaking change detectors (e.g. buf) against +different protobuf file changes to ensure proper and consistent behavior +in `allowed`and `breaking` circumstances. Although the dependency likely +already tests for these circumstances, these specify Envoy's requirements +and ensure that tool behavior is consistent across dependency updates. +""" + +import tempfile +import unittest +from pathlib import Path +from shutil import copyfile, copytree + +from rules_python.python.runfiles import runfiles + +from buf_utils import pull_buf_deps +from detector import BufWrapper +from tools.base.utils import cd_and_return +from tools.run_command import run_command + + +class BreakingChangeDetectorTests(object): + + def run_detector_test(self, testname, is_breaking, expects_changes, additional_args=None): + """Runs a test case for an arbitrary breaking change detector type""" + pass + + +class TestBreakingChanges(BreakingChangeDetectorTests): + + def run_breaking_test(self, testname): + self.run_detector_test(testname, is_breaking=True, expects_changes=False) + + def test_change_field_id(self): + self.run_breaking_test(self.test_change_field_id.__name__) + + def test_change_field_type(self): + self.run_breaking_test(self.test_change_field_type.__name__) + + def test_change_field_plurality(self): + self.run_breaking_test(self.test_change_field_plurality.__name__) + + def test_change_field_name(self): + self.run_breaking_test(self.test_change_field_name.__name__) + + def test_change_package_name(self): + self.run_breaking_test(self.test_change_package_name.__name__) + + def test_change_field_from_oneof(self): + self.run_breaking_test(self.test_change_field_from_oneof.__name__) + + def test_change_field_to_oneof(self): + self.run_breaking_test(self.test_change_field_to_oneof.__name__) + + def test_change_pgv_field(self): + self.run_breaking_test(self.test_change_pgv_field.__name__) + + def test_change_pgv_message(self): + self.run_breaking_test(self.test_change_pgv_message.__name__) + + def test_change_pgv_oneof(self): + self.run_breaking_test(self.test_change_pgv_oneof.__name__) + + +class TestAllowedChanges(BreakingChangeDetectorTests): + + def run_allowed_test(self, testname): + self.run_detector_test(testname, is_breaking=False, expects_changes=True) + + def test_add_comment(self): + self.run_allowed_test(self.test_add_comment.__name__) + + def test_add_field(self): + self.run_allowed_test(self.test_add_field.__name__) + + def test_add_option(self): + self.run_allowed_test(self.test_add_option.__name__) + + def test_add_enum_value(self): + self.run_allowed_test(self.test_add_enum_value.__name__) + + def test_remove_and_reserve_field(self): + self.run_allowed_test(self.test_remove_and_reserve_field.__name__) + + +class BufTests(TestAllowedChanges, TestBreakingChanges, unittest.TestCase): + _buf_path = runfiles.Create().Rlocation("com_github_bufbuild_buf/bin/buf") + + @classmethod + def _run_command_print_error(cls, cmd): + code, out, err = run_command(cmd) + out, err = '\n'.join(out), '\n'.join(err) + if code != 0: + raise Exception( + f"Error running command {cmd}\nExit code: {code} | stdout: {out} | stderr: {err}") + + @classmethod + def setUpClass(cls): + try: + # make temp dir + # buf requires protobuf files to be in a subdirectory of the directory containing the yaml file + cls._temp_dir = tempfile.TemporaryDirectory(dir=Path.cwd()) + cls._config_file_loc = Path(cls._temp_dir.name, "buf.yaml") + + # copy in test data + testdata_path = Path( + Path.cwd(), "tools", "testdata", "api_proto_breaking_change_detector") + copytree(testdata_path, cls._temp_dir.name, dirs_exist_ok=True) + + # copy in buf config + bazel_buf_config_loc = Path.cwd().joinpath( + "external", "envoy_api_canonical", "buf.yaml") + copyfile(bazel_buf_config_loc, cls._config_file_loc) + + # pull buf dependencies and initialize git repo with test data files + with cd_and_return(cls._temp_dir.name): + pull_buf_deps( + cls._buf_path, cls._temp_dir.name, config_file_loc=cls._config_file_loc) + cls._run_command_print_error('git init') + cls._run_command_print_error('git add .') + cls._run_command_print_error("git config user.name 'Bazel Test'") + cls._run_command_print_error("git config user.email '<>'") + cls._run_command_print_error('git commit -m "Initial commit"') + except: + cls.tearDownClass() + raise + + @classmethod + def tearDownClass(cls): + cls._temp_dir.cleanup() + + def tearDown(self): + # undo changes to proto file that were applied in test + with cd_and_return(self._temp_dir.name): + self._run_command_print_error('git reset --hard') + + def run_detector_test(self, testname, is_breaking, expects_changes, additional_args=None): + """Runs a test case for an arbitrary breaking change detector type""" + tests_path = Path(self._temp_dir.name, "breaking" if is_breaking else "allowed") + + target = Path(tests_path, f"{testname}.proto") + changed = Path(tests_path, f"{testname}_changed") + + # make changes to proto file + copyfile(changed, target) + + # buf breaking + detector_obj = BufWrapper( + self._temp_dir.name, + git_ref="HEAD", + git_path=Path(self._temp_dir.name, ".git"), + additional_args=additional_args, + buf_path=self._buf_path, + config_file_loc=self._config_file_loc) + detector_obj.run_detector() + + breaking_response = detector_obj.is_breaking() + self.assertEqual(breaking_response, is_breaking) + + @unittest.skip("PGV field support not yet added to buf") + def test_change_pgv_field(self): + pass + + @unittest.skip("PGV message option support not yet added to buf") + def test_change_pgv_message(self): + pass + + @unittest.skip("PGV oneof option support not yet added to buf") + def test_change_pgv_oneof(self): + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/base/BUILD b/tools/base/BUILD index f768786837847..8de9977da9dcd 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -6,6 +6,24 @@ licenses(["notice"]) # Apache 2 envoy_package() +exports_files([ + "base_command.py", +]) + +envoy_py_library( + "tools.base.aio", + deps = [ + requirement("aio.functional"), + ], +) + +envoy_py_library( + "tools.base.checker", + deps = [ + ":runner", + ], +) + envoy_py_library( "tools.base.runner", deps = [ @@ -20,12 +38,6 @@ envoy_py_library( "tools.base.utils", deps = [ requirement("pyyaml"), - ], -) - -envoy_py_library( - "tools.base.checker", - deps = [ - ":runner", + requirement("setuptools"), ], ) diff --git a/tools/base/aio.py b/tools/base/aio.py new file mode 100644 index 0000000000000..a07787c31133f --- /dev/null +++ b/tools/base/aio.py @@ -0,0 +1,509 @@ +import asyncio +import inspect +import os +import subprocess +import types +from concurrent.futures import Executor, ProcessPoolExecutor +from functools import cached_property, partial +from typing import ( + Any, AsyncGenerator, AsyncIterable, AsyncIterator, Awaitable, Iterable, Iterator, List, + Optional, Union) + +from aio.functional import async_property + + +class ConcurrentError(Exception): + """Raised when given inputs/awaitables are incorrect""" + pass + + +class ConcurrentIteratorError(ConcurrentError): + """Raised when iteration of provided awaitables fails""" + pass + + +class ConcurrentExecutionError(ConcurrentError): + """Raised when execution of a provided awaitable fails""" + pass + + +class async_subprocess: # noqa: N801 + + @classmethod + async def parallel( + cls, commands: Iterable[Iterable[str]], + **kwargs) -> AsyncGenerator[subprocess.CompletedProcess, Iterable[Iterable[str]]]: + """Run external subprocesses in parallel + + Yields `subprocess.CompletedProcess` results as they are completed. + + Example usage: + + ``` + import asyncio + + from tools.base.aio import async_subprocess + + async def run_system_commands(commands): + async for result in async_subprocess.parallel(commands, capture_output=True): + print(result.returncode) + print(result.stdout) + print(result.stderr) + + asyncio.run(run_system_commands(["whoami"] for i in range(0, 5))) + ``` + """ + # Using a `ProcessPoolExecutor` or `ThreadPoolExecutor` here is somewhat + # arbitrary as subproc will spawn a new process regardless. + # Either way - using a custom executor of either type gives considerable speedup, + # most likely due to the number of workers allocated. + # In my testing, `ProcessPoolExecutor` gave a very small speedup over a large + # number of tasks, despite any additional overhead of creating the executor. + # Without `max_workers` set `ProcessPoolExecutor` defaults to the number of cpus + # on the machine. + with ProcessPoolExecutor() as pool: + futures = asyncio.as_completed( + tuple( + asyncio.ensure_future(cls.run(command, executor=pool, **kwargs)) + for command in commands)) + for result in futures: + yield await result + + @classmethod + async def run( + cls, + *args, + loop: Optional[asyncio.AbstractEventLoop] = None, + executor: Optional[Executor] = None, + **kwargs) -> subprocess.CompletedProcess: + """This is an asyncio wrapper for `subprocess.run` + + It can be used in a similar way to `subprocess.run` but its non-blocking to + the main thread. + + Example usage: + + ``` + import asyncio + + from tools.base.aio import async_subprocess + + async def run_system_command(): + result = await async_subprocess.run(["whoami"], capture_output=True) + print(result.returncode) + print(result.stdout) + print(result.stderr) + + asyncio.run(run_system_command()) + + ``` + + By default it will spawn the process using the main event loop, and that loop's + default (`ThreadPool`) executor. + + You can provide the loop and/or the executor to change this behaviour. + """ + loop = loop or asyncio.get_running_loop() + return await loop.run_in_executor(executor, partial(subprocess.run, *args, **kwargs)) + + +_sentinel = object() + + +class concurrent: # noqa: N801 + """This utility provides very similar functionality to + `asyncio.as_completed` in that it runs coroutines in concurrent, yielding the + results as they are available. + + There are a couple of differences: + + - `coros` can be any `iterables` including sync/async `generators` + - `limit` can be supplied to specify the maximum number of concurrent tasks + + Setting `limit` to `-1` will make all tasks run in concurrent. + + The default is `number of cores + 4` to a maximum of `32`. + + For network tasks it might make sense to set the concurrency `limit` lower + than the default, if, for example, opening many concurrent connections will trigger + rate-limiting or soak bandwidth. + + If an error is raised while trying to iterate the provided coroutines, the + error is wrapped in an `ConcurrentIteratorError` and is raised immediately. + + In this case, no further handling occurs, and `yield_exceptions` has no + effect. + + Any errors raised while trying to create or run tasks are wrapped in + `ConcurrentError`. + + Any errors raised during task execution are wrapped in + `ConcurrentExecutionError`. + + If you specify `yield_exceptions` as `True` then the wrapped errors will be + yielded in the results. + + If `yield_exceptions` is False (the default), then the wrapped error will be + raised immediately. + + If you use any kind of `Generator` or `AsyncGenerator` to produce the + awaitables, and `yield_exceptions` is `False`, in the event that an error + occurs, it is your responsibility to `close` remaining awaitables that you + might have created but which have not already been fired. + + This utility is mostly useful for concurrentizing io-bound (as opposed to + cpu-bound) tasks. + + Example usage: + + ``` + import random + + from tools.base import aio + + async def task_to_run(i): + print(f"{i} starting") + wait = random.random() * 10 + await asyncio.sleep(wait) + return i, wait + + async def run(coros): + async for (i, wait) in aio.concurrent(coros, limit=3): + print(f"{i} waited {wait}") + + def provider(): + for i in range(0, 10): + yield task_to_run(i) + + asyncio.run(run(provider())) + ``` + """ + + def __init__( + self, + coros: Union[types.AsyncGeneratorType, AsyncIterable[Awaitable], + AsyncIterator[Awaitable], types.GeneratorType, Iterator[Awaitable], + Iterable[Awaitable]], + yield_exceptions: Optional[bool] = False, + limit: Optional[int] = None): + self._coros = coros + self._limit = limit + self._running: List[asyncio.Task] = [] + self.yield_exceptions = yield_exceptions + + def __aiter__(self) -> AsyncIterator: + """Start a coroutine task to process the submit queue, and return + an async generator to deliver results back as they arrive + """ + self.submit_task = asyncio.create_task(self.submit()) + return self.output() + + @property + def active(self) -> bool: + """Checks whether the iterator is active, either because it + hasn't finished submitting or because there are still tasks running + """ + return self.submitting or self.running + + @property + def closed(self) -> bool: + """If an unhandled error occurs, the generator is closed and no further + processing should happen + """ + return self.closing_lock.locked() + + @cached_property + def closing_lock(self) -> asyncio.Lock: + """Flag to indicate whether the generator has been closed""" + return asyncio.Lock() + + @cached_property + def consumes_async(self) -> bool: + """Provided coros iterable is some kind of async provider""" + return isinstance(self._coros, (types.AsyncGeneratorType, AsyncIterator, AsyncIterable)) + + @cached_property + def consumes_generator(self) -> bool: + """Provided coros iterable is some kind of generator""" + return isinstance(self._coros, (types.AsyncGeneratorType, types.GeneratorType)) + + @async_property + async def coros(self) -> AsyncIterator[Union[ConcurrentIteratorError, Awaitable]]: + """An async iterator of the provided coroutines""" + coros = self.iter_coros() + try: + async for coro in coros: + yield coro + except GeneratorExit: + # If we exit before we finish generating we land here (ie error was raised) + # In this case we need to tell the (possibly) async generating provider to + # also close. + try: + await coros.aclose() # type:ignore + finally: + # Suppress errors closing the provider generator + # This can raise a further `GeneratorExit` but it will stop providing. + return + + @property + def default_limit(self) -> int: + """Default is to use cpu+4 to a max of 32 coroutines""" + # This reflects the default for asyncio's `ThreadPoolExecutor`, this is a fairly + # arbitrary number to use, but it seems like a reasonable default. + return min(32, (os.cpu_count() or 0) + 4) + + @cached_property + def limit(self) -> int: + """The limit for concurrent coroutines""" + return self._limit or self.default_limit + + @cached_property + def nolimit(self) -> bool: + """Flag indicating no limit to concurrency""" + return self.limit == -1 + + @cached_property + def out(self) -> asyncio.Queue: + """Queue of results to yield back""" + return asyncio.Queue() + + @property + def running(self) -> bool: + """Flag to indicate whether any tasks are running""" + return not self.running_queue.empty() + + @cached_property + def running_queue(self) -> asyncio.Queue: + """Queue which is incremented/decremented as tasks begin/end + + This is for tracking when there are no longer any tasks running. + + A queue is used here as opposed to other synchronization primitives, as + it allows us to get the size and emptiness. + + The queue values are `None`. + """ + return asyncio.Queue() + + @cached_property + def running_tasks(self) -> List[asyncio.Task]: + """Currently running asyncio tasks""" + return self._running + + @cached_property + def sem(self) -> asyncio.Semaphore: + """A sem lock to limit the number of concurrent tasks""" + return asyncio.Semaphore(self.limit) + + @cached_property + def submission_lock(self) -> asyncio.Lock: + """Submission lock to indicate when submission is complete""" + return asyncio.Lock() + + @property + def submitting(self) -> bool: + """Flag to indicate whether we are still submitting coroutines""" + return self.submission_lock.locked() + + async def cancel(self) -> None: + """Stop the submission queue, cancel running tasks, close pending coroutines. + + This is triggered when an unhandled error occurs and the queue should + stop processing and bail. + """ + # Kitchen is closed + await self.close() + + # No more waiting + if not self.nolimit: + self.sem.release() + + # Cancel tasks + await self.cancel_tasks() + + # Close pending coroutines + await self.close_coros() + + # let the submission queue die + await self.submit_task + + async def cancel_tasks(self) -> None: + """Cancel any running tasks""" + + for running in self.running_tasks: + running.cancel() + try: + await running + finally: + # ignore errors, we are dying anyway + continue + + async def close(self) -> None: + """Close the generator, prevent any further processing""" + if not self.closed: + await self.closing_lock.acquire() + + async def close_coros(self) -> None: + """Close provided coroutines (unless the provided coros is a generator)""" + if self.consumes_generator: + # If we have a generator, dont blow/create/wait upon any more items + return + + async for coro in self.iter_coros(): + try: + # this could be an `aio.ConcurrentError` and not have a + # `close` method, but as we are asking for forgiveness anyway, + # no point in looking before we leap. + coro.close() # type:ignore + finally: + # ignore errors, we are dying anyway + continue + + async def create_task(self, coro: Awaitable) -> None: + """Create an asyncio task from the coroutine, and remember it""" + task = asyncio.create_task(self.task(coro)) + self.remember_task(task) + self.running_queue.put_nowait(None) + + async def exit_on_completion(self) -> None: + """Send the exit signal to the output queue""" + if not self.active and not self.closed: + await self.out.put(_sentinel) + + def forget_task(self, task: asyncio.Task) -> None: + """Task? what task?""" + if self.closed: + # If we are closing, don't remove, as this has been triggered + # by cancellation. + return + self.running_tasks.remove(task) + + async def iter_coros(self) -> AsyncIterator[Union[ConcurrentIteratorError, Awaitable]]: + """Iterate provided coros either synchronously or asynchronously, + yielding the awaitables asynchoronously. + """ + try: + if self.consumes_async: + async for coro in self._coros: # type:ignore + yield coro + else: + for coro in self._coros: # type:ignore + yield coro + except BaseException as e: + # Catch all errors iterating (other errors are caught elsewhere) + # If iterating raises, wrap the error and send it to `submit` and + # and `output` to close the queues. + yield ConcurrentIteratorError(e) + + async def on_task_complete(self, result: Any, decrement: Optional[bool] = True) -> None: + """Output the result, release the sem lock, decrement the running + count, and notify output queue if complete. + """ + if self.closed: + # Results can come back after the queue has closed as they are + # cancelled. + # In that case, nothing further to do. + return + + # Give result to output + await self.out.put(result) + + if not self.nolimit: + # Release the sem.lock + self.sem.release() + if decrement: + # Decrement the running_queue if it was incremented + self.running_queue.get_nowait() + # Exit if nothing left to do + await self.exit_on_completion() + + async def output(self) -> AsyncIterator: + """Asynchronously yield results as they become available""" + while True: + # Wait for some output + result = await self.out.get() + if result is _sentinel: + # All done! + await self.close() + break + elif self.should_error(result): + # Raise an error and bail! + await self.cancel() + raise result + yield result + + async def ready(self) -> bool: + """Wait for the sem.lock and indicate availability in the submission + queue + """ + if self.closed: + return False + if not self.nolimit: + await self.sem.acquire() + # We check before and after acquiring the sem.lock to see whether + # we are `closed` as these events can be separated in + # time/procedure. + if self.closed: + return False + return True + + def remember_task(self, task: asyncio.Task) -> None: + """Remember a scheduled asyncio task, in case it needs to be + cancelled + """ + self.running_tasks.append(task) + task.add_done_callback(self.forget_task) + + def should_error(self, result: Any) -> bool: + """Check a result type and whether it should raise an error""" + return ( + isinstance(result, ConcurrentIteratorError) + or (isinstance(result, ConcurrentError) and not self.yield_exceptions)) + + async def submit(self) -> None: + """Process the iterator of coroutines as a submission queue""" + await self.submission_lock.acquire() + async for coro in self.coros: + if isinstance(coro, ConcurrentIteratorError): + # Iteration error, exit now + await self.out.put(coro) + break + if not await self.ready(): + # Queue is closing, get out of here + try: + # Ensure the last coro to be produced/generated is closed, + # as it will not be scheduled as a task, and in the case + # of generators it wont be closed any other way. + coro.close() + finally: + # ignore all coro closing errors, we are dying + break + # Check the supplied coro is awaitable + try: + self.validate_coro(coro) + except ConcurrentError as e: + await self.on_task_complete(e, decrement=False) + continue + # All good, create a task + await self.create_task(coro) + self.submission_lock.release() + # If cleanup of the submission queue has taken longer than processing + # we need to manually close + await self.exit_on_completion() + + async def task(self, coro: Awaitable) -> None: + """Task wrapper to catch/wrap errors and output awaited results""" + try: + result = await coro + except BaseException as e: + result = ConcurrentExecutionError(e) + finally: + await self.on_task_complete(result) + + def validate_coro(self, coro: Awaitable) -> None: + """Validate that a provided coroutine is actually awaitable""" + if not inspect.isawaitable(coro): + raise ConcurrentError(f"Provided input was not a coroutine: {coro}") + + if inspect.getcoroutinestate(coro) != inspect.CORO_CREATED: + raise ConcurrentError(f"Provided coroutine has already been fired: {coro}") diff --git a/tools/base/base_command.py b/tools/base/base_command.py new file mode 100644 index 0000000000000..41cd5675da16f --- /dev/null +++ b/tools/base/base_command.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python3 + +import sys + +from __UPSTREAM_PACKAGE__ import main as upstream_main + + +def main(*args: str) -> int: + return upstream_main(*args) + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) diff --git a/tools/base/checker.py b/tools/base/checker.py index 8a73fef52dfe2..4feed282d2ec1 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -1,20 +1,20 @@ import argparse import asyncio import logging -import os +import pathlib from functools import cached_property -from typing import Optional, Sequence, Tuple, Type +from typing import Any, Iterable, Optional, Sequence, Tuple, Type from tools.base import runner -class Checker(runner.Runner): +class BaseChecker(runner.Runner): """Runs check methods prefixed with `check_` and named in `self.checks` Check methods should call the `self.warn`, `self.error` or `self.succeed` depending upon the outcome of the checks. """ - _active_check: Optional[str] = None + _active_check = "" checks: Tuple[str, ...] = () def __init__(self, *args): @@ -24,7 +24,7 @@ def __init__(self, *args): self.warnings = {} @property - def active_check(self) -> Optional[str]: + def active_check(self) -> str: return self._active_check @property @@ -58,14 +58,14 @@ def has_failed(self) -> bool: return bool(self.failed or self.warned) @cached_property - def path(self) -> str: + def path(self) -> pathlib.Path: """The "path" - usually Envoy src dir. This is used for finding configs for the tooling and should be a dir""" try: - path = self.args.path or self.args.paths[0] + path = pathlib.Path(self.args.path or self.args.paths[0]) except IndexError: raise self.parser.error( "Missing path: `path` must be set either as an arg or with --path") - if not os.path.isdir(path): + if not path.is_dir(): raise self.parser.error( "Incorrect path: `path` must be a directory, set either as first arg or with --path" ) @@ -174,7 +174,12 @@ def add_arguments(self, parser: argparse.ArgumentParser) -> None: "Paths to check. At least one path must be specified, or the `path` argument should be provided" ) - def error(self, name: str, errors: list, log: bool = True, log_type: str = "error") -> int: + def error( + self, + name: str, + errors: Optional[Iterable[str]], + log: bool = True, + log_type: str = "error") -> int: """Record (and log) errors for a check type""" if not errors: return 0 @@ -197,13 +202,13 @@ def get_checks(self) -> Sequence[str]: self.checks if not self.args.check else [check for check in self.args.check if check in self.checks]) - def on_check_begin(self, check: str) -> None: + def on_check_begin(self, check: str) -> Any: self._active_check = check self.log.notice(f"[{check}] Running check") - def on_check_run(self, check: str) -> None: + def on_check_run(self, check: str) -> Any: """Callback hook called after each check run""" - self._active_check = None + self._active_check = "" if self.exiting: return elif check in self.errors: @@ -213,16 +218,17 @@ def on_check_run(self, check: str) -> None: else: self.log.success(f"[{check}] Check completed successfully") - def on_checks_begin(self) -> None: + def on_checks_begin(self) -> Any: """Callback hook called before all checks""" pass - def on_checks_complete(self) -> int: + def on_checks_complete(self) -> Any: """Callback hook called after all checks have run, and returning the final outcome of a checks_run""" if self.show_summary: self.summary.print_summary() return 1 if self.has_failed else 0 + @runner.cleansup def run(self) -> int: """Run all configured checks and return the sum of their error counts""" checks = self.get_checks() @@ -257,8 +263,19 @@ def warn(self, name: str, warnings: list, log: bool = True) -> None: self.log.warning(f"[{name}] {message}") -class ForkingChecker(runner.ForkingRunner, Checker): - pass +class Checker(BaseChecker): + + def on_check_begin(self, check: str) -> None: + super().on_check_begin(check) + + def on_check_run(self, check: str) -> None: + super().on_check_run(check) + + def on_checks_begin(self) -> None: + super().on_checks_complete() + + def on_checks_complete(self) -> int: + return super().on_checks_complete() class BazelChecker(runner.BazelRunner, Checker): @@ -267,7 +284,7 @@ class BazelChecker(runner.BazelRunner, Checker): class CheckerSummary(object): - def __init__(self, checker: Checker): + def __init__(self, checker: BaseChecker): self.checker = checker @property @@ -319,7 +336,7 @@ def _section(self, message: str, lines: list = None) -> list: return section -class AsyncChecker(Checker): +class AsyncChecker(BaseChecker): """Async version of the Checker class for use with asyncio""" async def _run(self) -> int: @@ -337,6 +354,7 @@ async def _run(self) -> int: result = await self.on_checks_complete() return result + @runner.cleansup def run(self) -> int: try: return asyncio.get_event_loop().run_until_complete(self._run()) diff --git a/tools/base/envoy_python.bzl b/tools/base/envoy_python.bzl index 1a4ecf0d91c75..3af2b50323834 100644 --- a/tools/base/envoy_python.bzl +++ b/tools/base/envoy_python.bzl @@ -1,27 +1,30 @@ load("@rules_python//python:defs.bzl", "py_binary", "py_library") -def envoy_py_test(name, package, visibility): +def envoy_py_test(name, package, visibility, envoy_prefix = "@envoy"): + filepath = "$(location %s//tools/testing:base_pytest_runner.py)" % envoy_prefix + output = "$(@D)/pytest_%s.py" % name + native.genrule( name = "generate_pytest_" + name, - cmd = "sed s/_PACKAGE_NAME_/" + package + "/ $(location //tools/testing:base_pytest_runner.py) > \"$(@D)/pytest_" + name + ".py\"", - tools = ["//tools/testing:base_pytest_runner.py"], - outs = ["pytest_" + name + ".py"], + cmd = "sed s/_PACKAGE_NAME_/%s/ %s > \"%s\"" % (package, filepath, output), + tools = ["%s//tools/testing:base_pytest_runner.py" % envoy_prefix], + outs = ["pytest_%s.py" % name], ) test_deps = [ - ":" + name, + ":%s" % name, ] if name != "python_pytest": - test_deps.append("//tools/testing:python_pytest") + test_deps.append("%s//tools/testing:python_pytest" % envoy_prefix) py_binary( - name = "pytest_" + name, + name = "pytest_%s" % name, srcs = [ - "pytest_" + name + ".py", - "tests/test_" + name + ".py", + "pytest_%s.py" % name, + "tests/test_%s.py" % name, ], - data = [":generate_pytest_" + name], + data = [":generate_pytest_%s" % name], deps = test_deps, visibility = visibility, ) @@ -30,36 +33,96 @@ def envoy_py_library( name = None, deps = [], data = [], - visibility = ["//visibility:public"]): + visibility = ["//visibility:public"], + envoy_prefix = "", + test = True): _parts = name.split(".") package = ".".join(_parts[:-1]) name = _parts[-1] py_library( name = name, - srcs = [name + ".py"], + srcs = ["%s.py" % name], deps = deps, data = data, visibility = visibility, ) - - envoy_py_test(name, package, visibility) + if test: + envoy_py_test(name, package, visibility, envoy_prefix = envoy_prefix) def envoy_py_binary( name = None, deps = [], data = [], - visibility = ["//visibility:public"]): + visibility = ["//visibility:public"], + envoy_prefix = "@envoy", + test = True): _parts = name.split(".") package = ".".join(_parts[:-1]) name = _parts[-1] py_binary( name = name, - srcs = [name + ".py"], + srcs = ["%s.py" % name], deps = deps, data = data, visibility = visibility, ) - envoy_py_test(name, package, visibility) + if test: + envoy_py_test(name, package, visibility, envoy_prefix = envoy_prefix) + +def envoy_py_script( + name, + entry_point, + deps = [], + data = [], + visibility = ["//visibility:public"], + envoy_prefix = "@envoy"): + """This generates a `py_binary` from an entry_point in a python package + + Currently, the actual entrypoint callable is hard-coded to `main`. + + For example, if you wish to make use of a `console_script` in an upstream + package that resolves as `envoy.code_format.python.command.main` from a + package named `envoy.code_format.python`, you can use this macro as + follows: + + ```skylark + + envoy_py_script( + name = "tools.code_format.python", + entry_point = "envoy.code_format.python.command", + deps = [requirement("envoy.code_format.python")], + ``` + + You will then be able to use the console script from bazel. + + Separate args to be passed to the console_script with `--`, eg: + + ```console + + $ bazel run //tools/code_format:python -- -h + ``` + + """ + py_file = "%s.py" % name.split(".")[-1] + output = "$(@D)/%s" % py_file + template_rule = "%s//tools/base:base_command.py" % envoy_prefix + template = "$(location %s)" % template_rule + + native.genrule( + name = "py_script_%s" % py_file, + cmd = "sed s/__UPSTREAM_PACKAGE__/%s/ %s > \"%s\"" % (entry_point, template, output), + tools = [template_rule], + outs = [py_file], + ) + + envoy_py_binary( + name = name, + deps = deps, + data = data, + visibility = visibility, + envoy_prefix = envoy_prefix, + test = False, + ) diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index f7f9ecb473a03..658174e0bc9ff 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -2,8 +2,14 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile --generate-hashes tools/base/requirements.txt +# pip-compile --allow-unsafe --generate-hashes tools/base/requirements.txt # +abstracts==0.0.12 \ + --hash=sha256:acc01ff56c8a05fb88150dff62e295f9071fc33388c42f1dfc2787a8d1c755ff + # via aio.functional +aio.functional==0.0.9 \ + --hash=sha256:824a997a394ad891bc9f403426babc13c9d0d1f4d1708c38e77d6aecae1cab1d + # via -r tools/base/requirements.txt colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 @@ -12,9 +18,9 @@ coloredlogs==15.0.1 \ --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \ --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0 # via -r tools/base/requirements.txt -frozendict==2.0.3 \ - --hash=sha256:163c616188beb97fdc8ef6e73ec2ebd70a844d4cf19d2e383aa94d1b8376653d \ - --hash=sha256:58143e2d3d11699bc295d9e7e05f10dde99a727e2295d7f43542ecdc42c5ec70 +frozendict==2.0.6 \ + --hash=sha256:3f00de72805cf4c9e81b334f3f04809278b967d2fed84552313a0fcce511beb1 \ + --hash=sha256:5d3f75832c35d4df041f0e19c268964cbef29c1eb34cd3517cf883f1c2d089b9 # via -r tools/base/requirements.txt humanfriendly==9.2 \ --hash=sha256:332da98c24cc150efcc91b5508b19115209272bfdf4b0764a56795932f854271 \ @@ -52,8 +58,14 @@ pyyaml==5.4.1 \ --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 - # via -r tools/distribution/requirements.txt + # via -r tools/base/requirements.txt verboselogs==1.7 \ --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 # via -r tools/base/requirements.txt + +# The following packages are considered to be unsafe in a requirements file: +setuptools==57.4.0 \ + --hash=sha256:6bac238ffdf24e8806c61440e755192470352850f3419a52f26ffe0a1a64f465 \ + --hash=sha256:a49230977aa6cfb9d933614d2f7b79036e9945c4cdd7583163f4e920b83418d6 + # via -r tools/base/requirements.txt diff --git a/tools/base/runner.py b/tools/base/runner.py index 5f4d18af6cf6a..87d5577b370a0 100644 --- a/tools/base/runner.py +++ b/tools/base/runner.py @@ -3,24 +3,26 @@ # import argparse +import inspect import logging -import os +import pathlib import subprocess import sys +import tempfile from functools import cached_property, wraps -from typing import Callable, Tuple, Optional, Union +from typing import Callable, Optional, Tuple, Type, Union from frozendict import frozendict -import coloredlogs -import verboselogs +import coloredlogs # type:ignore +import verboselogs # type:ignore LOG_LEVELS = (("debug", logging.DEBUG), ("info", logging.INFO), ("warn", logging.WARN), ("error", logging.ERROR)) -LOG_FIELD_STYLES = frozendict( +LOG_FIELD_STYLES: frozendict = frozendict( name=frozendict(color="blue"), levelname=frozendict(color="cyan", bold=True)) LOG_FMT = "%(name)s %(levelname)s %(message)s" -LOG_LEVEL_STYLES = frozendict( +LOG_LEVEL_STYLES: frozendict = frozendict( critical=frozendict(bold=True, color="red"), debug=frozendict(color="green"), error=frozendict(color="red", bold=True), @@ -32,7 +34,7 @@ warning=frozendict(color="yellow", bold=True)) -def catches(errors: Union[Tuple[Exception], Exception]) -> Callable: +def catches(errors: Union[Type[Exception], Tuple[Type[Exception], ...]]) -> Callable: """Method decorator to catch specified errors logs and returns 1 for sys.exit if error/s are caught @@ -48,6 +50,7 @@ def run(self): self.myrun() ``` + Can work with `async` methods too. """ def wrapper(fun: Callable) -> Callable: @@ -60,11 +63,55 @@ def wrapped(self, *args, **kwargs) -> Optional[int]: self.log.error(str(e) or repr(e)) return 1 - return wrapped + @wraps(fun) + async def async_wrapped(self, *args, **kwargs) -> Optional[int]: + try: + return await fun(self, *args, **kwargs) + except errors as e: + self.log.error(str(e) or repr(e)) + return 1 + + wrapped_fun = async_wrapped if inspect.iscoroutinefunction(fun) else wrapped + + # mypy doesnt trust `@wraps` to give back a `__wrapped__` object so we + # need to code defensively here + wrapping = getattr(wrapped_fun, "__wrapped__", None) + if wrapping: + setattr(wrapping, "__catches__", errors) + return wrapped_fun return wrapper +def cleansup(fun) -> Callable: + """Method decorator to call `.cleanup()` after run. + + Can work with `sync` and `async` methods. + """ + + @wraps(fun) + def wrapped(self, *args, **kwargs) -> Optional[int]: + try: + return fun(self, *args, **kwargs) + finally: + self.cleanup() + + @wraps(fun) + async def async_wrapped(self, *args, **kwargs) -> Optional[int]: + try: + return await fun(self, *args, **kwargs) + finally: + await self.cleanup() + + # mypy doesnt trust `@wraps` to give back a `__wrapped__` object so we + # need to code defensively here + wrapped_fun = async_wrapped if inspect.iscoroutinefunction(fun) else wrapped + wrapping = getattr(wrapped_fun, "__wrapped__", None) + if wrapping: + setattr(wrapping, "__cleansup__", True) + return wrapped_fun + + class BazelRunError(Exception): pass @@ -75,7 +122,7 @@ def filter(self, rec): return rec.levelno in (logging.DEBUG, logging.INFO) -class Runner(object): +class BaseRunner: def __init__(self, *args): self._args = args @@ -103,7 +150,7 @@ def log_level_styles(self): return LOG_LEVEL_STYLES @cached_property - def log(self) -> logging.Logger: + def log(self) -> verboselogs.VerboseLogger: """Instantiated logger""" verboselogs.install() logger = logging.getLogger(self.name) @@ -135,8 +182,8 @@ def parser(self) -> argparse.ArgumentParser: return parser @cached_property - def path(self) -> str: - return os.getcwd() + def path(self) -> pathlib.Path: + return pathlib.Path(".") @cached_property def stdout(self) -> logging.Logger: @@ -148,6 +195,19 @@ def stdout(self) -> logging.Logger: logger.addHandler(handler) return logger + @cached_property + def tempdir(self) -> tempfile.TemporaryDirectory: + """If you call this property, remember to call `.cleanup` + + For `run` methods this should be done by decorating the method with + `@runner.cleansup` + """ + if self._missing_cleanup: + self.log.warning( + "Tempdir created but instance has a `run` method which is not decorated with `@runner.cleansup`" + ) + return tempfile.TemporaryDirectory() + def add_arguments(self, parser: argparse.ArgumentParser) -> None: """Override this method to add custom arguments to the arg parser""" parser.add_argument( @@ -157,8 +217,32 @@ def add_arguments(self, parser: argparse.ArgumentParser) -> None: default="info", help="Log level to display") + @property + def _missing_cleanup(self) -> bool: + run_fun = getattr(self, "run", None) + return bool( + run_fun + and not getattr(getattr(run_fun, "__wrapped__", object()), "__cleansup__", False)) + + def _cleanup_tempdir(self) -> None: + if "tempdir" in self.__dict__: + self.tempdir.cleanup() + del self.__dict__["tempdir"] + + +class Runner(BaseRunner): + + def cleanup(self) -> None: + self._cleanup_tempdir() + + +class AsyncRunner(BaseRunner): + + async def cleanup(self) -> None: + self._cleanup_tempdir() + -class ForkingAdapter(object): +class ForkingAdapter: def __init__(self, context: Runner): self.context = context @@ -173,7 +257,7 @@ def subproc_run( return subprocess.run(*args, capture_output=capture_output, **kwargs) -class BazelAdapter(object): +class BazelAdapter: def __init__(self, context: "ForkingRunner"): self.context = context diff --git a/tools/base/tests/test_aio.py b/tools/base/tests/test_aio.py new file mode 100644 index 0000000000000..e80747f5d4a8a --- /dev/null +++ b/tools/base/tests/test_aio.py @@ -0,0 +1,1289 @@ + +import asyncio +import gc +import inspect +import types +from typing import AsyncIterator, AsyncIterable +from unittest.mock import AsyncMock, MagicMock, PropertyMock + +import pytest + +from tools.base import aio + + +@pytest.mark.asyncio +async def test_async_subprocess_parallel(patches): + patched = patches( + "asyncio", + "ProcessPoolExecutor", + "async_subprocess.run", + prefix="tools.base.aio") + procs = [f"PROC{i}" for i in range(0, 3)] + kwargs = {f"KEY{i}": f"VALUE{i}" for i in range(0, 3)} + + async def async_result(result): + return result + + with patched as (m_asyncio, m_future, m_run): + returned = [f"RESULT{i}" for i in range(0, 5)] + m_asyncio.as_completed.return_value = [ + async_result(result) for result in returned] + + results = [] + async for result in aio.async_subprocess.parallel(procs, **kwargs): + results.append(result) + + assert results == returned + assert ( + list(m_future.call_args) + == [(), {}]) + assert ( + list(m_asyncio.as_completed.call_args) + == [(tuple(m_asyncio.ensure_future.return_value for i in range(0, len(procs))), ), {}]) + kwargs["executor"] = m_future.return_value.__enter__.return_value + assert ( + list(list(c) for c in m_run.call_args_list) + == [[(proc,), kwargs] for proc in procs]) + assert ( + list(list(c) for c in m_asyncio.ensure_future.call_args_list) + == [[(m_run.return_value,), {}] for proc in procs]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("loop", [True, False]) +@pytest.mark.parametrize("executor", [None, "EXECUTOR"]) +async def test_async_subprocess_run(patches, loop, executor): + patched = patches( + "asyncio", + "partial", + "subprocess", + prefix="tools.base.aio") + args = [f"ARG{i}" for i in range(0, 3)] + kwargs = {f"KEY{i}": f"VALUE{i}" for i in range(0, 3)} + + if loop: + kwargs["loop"] = AsyncMock() + + if executor: + kwargs["executor"] = executor + + with patched as (m_asyncio, m_partial, m_subproc): + m_asyncio.get_running_loop.return_value = AsyncMock() + if loop: + m_loop = kwargs["loop"] + else: + m_loop = m_asyncio.get_running_loop.return_value + + assert ( + await aio.async_subprocess.run(*args, **kwargs) + == m_loop.run_in_executor.return_value) + + if loop: + assert not m_asyncio.get_running_loop.called + + kwargs.pop("executor", None) + kwargs.pop("loop", None) + + assert ( + list(m_partial.call_args) + == [(m_subproc.run, ) + tuple(args), kwargs]) + assert ( + list(m_loop.run_in_executor.call_args) + == [(executor, m_partial.return_value), {}]) + + +@pytest.mark.parametrize("limit", ["XX", None, "", 0, -1, 73]) +@pytest.mark.parametrize("yield_exceptions", [None, True, False]) +def test_aio_concurrent_constructor(limit, yield_exceptions): + kwargs = {} + if limit == "XX": + limit = None + else: + kwargs["limit"] = limit + if yield_exceptions is not None: + kwargs["yield_exceptions"] = yield_exceptions + + concurrent = aio.concurrent(["CORO"], **kwargs) + assert concurrent._coros == ["CORO"] + assert concurrent._limit == limit + assert ( + concurrent.yield_exceptions + == (False + if yield_exceptions is None + else yield_exceptions)) + assert concurrent._running == [] + + assert concurrent.running_tasks is concurrent._running + assert "running_tasks" in concurrent.__dict__ + + +def test_aio_concurrent_dunder_aiter(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "asyncio", + "concurrent.output", + ("concurrent.submit", dict(new_callable=MagicMock)), + prefix="tools.base.aio") + + with patched as (m_asyncio, m_output, m_submit): + assert concurrent.__aiter__() == m_output.return_value + + assert concurrent.submit_task == m_asyncio.create_task.return_value + assert ( + list(m_submit.call_args) + == [(), {}]) + assert ( + list(m_asyncio.create_task.call_args) + == [(m_submit.return_value, ), {}]) + + +@pytest.mark.parametrize("running", [True, False]) +@pytest.mark.parametrize("submitting", [True, False]) +def test_aio_concurrent_active(patches, running, submitting): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "asyncio", + ("concurrent.submitting", dict(new_callable=PropertyMock)), + ("concurrent.running", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_asyncio, m_submit, m_run): + m_submit.return_value = submitting + m_run.return_value = running + assert concurrent.active == (submitting or running) + + assert "active" not in concurrent.__dict__ + + +def test_aio_concurrent_closing_lock(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "asyncio", + prefix="tools.base.aio") + + with patched as (m_asyncio, ): + assert concurrent.closing_lock == m_asyncio.Lock.return_value + + assert ( + list(m_asyncio.Lock.call_args) + == [(), {}]) + assert "closing_lock" in concurrent.__dict__ + + + +@pytest.mark.parametrize("locked", [True, False]) +def test_aio_concurrent_closed(patches, locked): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.closing_lock", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_closing_lock, ): + m_closing_lock.return_value.locked.return_value = locked + assert concurrent.closed == locked + + assert "closed" not in concurrent.__dict__ + + +@pytest.mark.asyncio +@pytest.mark.parametrize("raises", [None, BaseException, GeneratorExit]) +@pytest.mark.parametrize("close_raises", [None, BaseException]) +async def test_aio_concurrent_coros(patches, raises, close_raises): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.iter_coros", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + results = [] + return_coros = [f"CORO{i}" for i in range(0, 3)] + m_aclose = AsyncMock() + if close_raises: + m_aclose.side_effect = close_raises() + + class Coros: + aclose = m_aclose + + def __call__(self): + return self + + async def __aiter__(self): + if raises: + raise raises("AN ERROR OCCURRED") + for coro in return_coros: + yield coro + + with patched as (m_coros, ): + coros = Coros() + m_coros.return_value = coros + if raises == BaseException: + with pytest.raises(BaseException): + async for coro in concurrent.coros: + pass + else: + async for coro in concurrent.coros: + results.append(coro) + + if raises == GeneratorExit: + assert ( + list(coros.aclose.call_args) + == [(), {}]) + return + + assert not coros.aclose.called + assert "coros" not in concurrent.__dict__ + + if raises: + return + assert results == return_coros + + +def test_aio_concurrent_running_queue(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "asyncio", + prefix="tools.base.aio") + + with patched as (m_asyncio, ): + assert concurrent.running_queue == m_asyncio.Queue.return_value + + assert ( + list(m_asyncio.Queue.call_args) + == [(), {}]) + assert "running_queue" in concurrent.__dict__ + + +@pytest.mark.parametrize("cpus", [None, "", 0, 4, 73]) +def test_aio_concurrent_default_limit(patches, cpus): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "min", + "os", + prefix="tools.base.aio") + + with patched as (m_min, m_os): + m_os.cpu_count.return_value = cpus + assert concurrent.default_limit == m_min.return_value + + assert ( + list(m_min.call_args) + == [(32, (cpus or 0) + 4), {}]) + assert "default_limit" not in concurrent.__dict__ + + +def test_aio_concurrent_consumes_async(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "isinstance", + prefix="tools.base.aio") + + with patched as (m_inst, ): + assert concurrent.consumes_async == m_inst.return_value + + assert ( + list(m_inst.call_args) + == [(["CORO"], (types.AsyncGeneratorType, AsyncIterator, AsyncIterable)), {}]) + assert "consumes_async" in concurrent.__dict__ + + +def test_aio_concurrent_consumes_generator(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "isinstance", + prefix="tools.base.aio") + + with patched as (m_inst, ): + assert concurrent.consumes_generator == m_inst.return_value + + assert ( + list(m_inst.call_args) + == [(["CORO"], (types.AsyncGeneratorType, types.GeneratorType)), {}]) + assert "consumes_generator" in concurrent.__dict__ + + +@pytest.mark.parametrize("limit", [None, "", 0, -1, 73]) +def test_aio_concurrent_limit(patches, limit): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.default_limit", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + concurrent._limit = limit + + with patched as (m_limit, ): + assert concurrent.limit == (limit or m_limit.return_value) + + if limit: + assert not m_limit.called + + assert "limit" in concurrent.__dict__ + + +@pytest.mark.parametrize("limit", [None, "", 0, -1, 73]) +def test_aio_concurrent_nolimit(patches, limit): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.limit", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_limit, ): + m_limit.return_value = limit + assert concurrent.nolimit == (limit == -1) + + assert "nolimit" in concurrent.__dict__ + + +def test_aio_concurrent_out(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "asyncio", + prefix="tools.base.aio") + + with patched as (m_asyncio, ): + assert concurrent.out == m_asyncio.Queue.return_value + + assert ( + list(m_asyncio.Queue.call_args) + == [(), {}]) + assert "out" in concurrent.__dict__ + + +@pytest.mark.parametrize("empty", [True, False]) +def test_aio_concurrent_running(patches, empty): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.running_queue", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_running_queue, ): + m_running_queue.return_value.empty.return_value = empty + assert concurrent.running == (not empty) + + assert "running" not in concurrent.__dict__ + + +def test_aio_concurrent_sem(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "asyncio", + ("concurrent.limit", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_asyncio, m_limit): + assert concurrent.sem == m_asyncio.Semaphore.return_value + + assert ( + list(m_asyncio.Semaphore.call_args) + == [(m_limit.return_value, ), {}]) + assert "sem" in concurrent.__dict__ + + +def test_aio_concurrent_submission_lock(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "asyncio", + prefix="tools.base.aio") + + with patched as (m_asyncio, ): + assert concurrent.submission_lock == m_asyncio.Lock.return_value + + assert ( + list(m_asyncio.Lock.call_args) + == [(), {}]) + assert "submission_lock" in concurrent.__dict__ + + +@pytest.mark.parametrize("locked", [True, False]) +def test_aio_concurrent_submitting(patches, locked): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.submission_lock", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_submission_lock, ): + m_submission_lock.return_value.locked.return_value = locked + assert concurrent.submitting == locked + + assert "submitting" not in concurrent.__dict__ + + +@pytest.mark.asyncio +async def test_aio_concurrent_cancel(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.cancel_tasks", dict(new_callable=AsyncMock)), + ("concurrent.close", dict(new_callable=AsyncMock)), + ("concurrent.close_coros", dict(new_callable=AsyncMock)), + ("concurrent.sem", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + waiter = MagicMock() + + class SubmitTask: + def __init__(self): + self.cancel = MagicMock() + + def __await__(self): + waiter() + yield + + concurrent.submit_task = SubmitTask() + + with patched as (m_cancel, m_close, m_coros, m_sem): + assert not await concurrent.cancel() + + assert ( + list(m_close.call_args) + == [(), {}]) + assert ( + list(m_sem.return_value.release.call_args) + == [(), {}]) + assert ( + list(m_cancel.call_args) + == [(), {}]) + assert ( + list(m_coros.call_args) + == [(), {}]) + assert ( + list(waiter.call_args) + == [(), {}]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("bad", range(0, 8)) +async def test_aio_concurrent_cancel_tasks(patches, bad): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.running_tasks", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + tasks = [] + waiter = MagicMock() + + class Task: + def __init__(self, i): + self.i = i + self.cancel = MagicMock() + + def __await__(self): + waiter() + if self.i == bad: + raise BaseException("AN ERROR OCCURRED") + + for i in range(0, 7): + tasks.append(Task(i)) + + with patched as (m_running, ): + m_running.return_value = tasks + assert not await concurrent.cancel_tasks() + + assert ( + list(list(c) for c in waiter.call_args_list) + == [[(), {}]] * 7) + for task in tasks: + assert ( + list(task.cancel.call_args) + == [(), {}]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("closed", [True, False]) +async def test_aio_concurrent_close(patches, closed): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.closed", dict(new_callable=PropertyMock)), + ("concurrent.closing_lock", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_closed, m_lock): + m_closed.return_value = closed + m_lock.return_value.acquire = AsyncMock() + assert not await concurrent.close() + + if closed: + assert not m_lock.called + else: + assert ( + list(m_lock.return_value.acquire.call_args) + == [(), {}]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("consumes_generator", [True, False]) +@pytest.mark.parametrize("bad", range(0, 8)) +async def test_aio_concurrent_close_coros(patches, consumes_generator, bad): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "concurrent.close", + ("concurrent.iter_coros", dict(new_callable=PropertyMock)), + ("concurrent.consumes_generator", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + coros = [] + for i in range(0, 7): + coro = MagicMock() + if i == bad: + coro.close.side_effect = BaseException("AN ERROR OCCURRED") + coros.append(coro) + + async def iter_coros(): + for coro in coros: + yield coro + + with patched as (m_close, m_iter, m_isgen): + m_isgen.return_value = consumes_generator + m_iter.return_value = iter_coros + assert not await concurrent.close_coros() + + if consumes_generator: + assert not m_iter.called + return + assert ( + list(m_iter.call_args) + == [(), {}]) + for coro in coros: + assert ( + list(coro.close.call_args) + == [(), {}]) + + +@pytest.mark.asyncio +async def test_aio_concurrent_create_task(patches): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "asyncio", + "concurrent.remember_task", + ("concurrent.task", dict(new_callable=MagicMock)), + ("concurrent.running_queue", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_asyncio, m_rem, m_task, m_running_queue): + assert not await concurrent.create_task("CORO") + + assert ( + list(m_running_queue.return_value.put_nowait.call_args) + == [(None, ), {}]) + assert ( + list(m_task.call_args) + == [("CORO", ), {}]) + assert ( + list(m_asyncio.create_task.call_args) + == [(m_task.return_value, ), {}]) + assert ( + list(m_rem.call_args) + == [(m_asyncio.create_task.return_value, ), {}]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("closed", [True, False]) +@pytest.mark.parametrize("active", [True, False]) +async def test_aio_concurrent_exit_on_completion(patches, active, closed): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.active", dict(new_callable=PropertyMock)), + ("concurrent.closed", dict(new_callable=PropertyMock)), + ("concurrent.out", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + with patched as (m_active, m_closed, m_out): + m_out.return_value.put = AsyncMock() + m_active.return_value = active + m_closed.return_value = closed + assert not await concurrent.exit_on_completion() + + if closed or active: + assert not m_out.called + return + assert ( + list(m_out.return_value.put.call_args) + == [(aio._sentinel, ), {}]) + + +@pytest.mark.parametrize("closed", [True, False]) +def test_aio_concurrent_forget_task(patches, closed): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.closed", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + concurrent._running = MagicMock() + + with patched as (m_closed, ): + m_closed.return_value = closed + assert not concurrent.forget_task("TASK") + + if closed: + assert not concurrent._running.remove.called + return + assert ( + list(concurrent._running.remove.call_args) + == [("TASK", ), {}]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("raises", [True, False]) +@pytest.mark.parametrize("consumes_async", [True, False]) +async def test_aio_concurrent_iter_coros(patches, raises, consumes_async): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.consumes_async", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + coros = [f"CORO{i}" for i in range(0, 7)] + exception = BaseException("AN RAISES OCCURRED") + + def iter_coros(): + if raises: + raise exception + for coro in coros: + yield coro + + async def async_iter_coros(): + if raises: + raise exception + for coro in coros: + yield coro + + concurrent._coros = ( + async_iter_coros() + if consumes_async + else iter_coros()) + results = [] + + with patched as (m_async, ): + m_async.return_value = consumes_async + + async for result in concurrent.iter_coros(): + results.append(result) + + if raises: + error = results[0] + assert isinstance(error, aio.ConcurrentIteratorError) + assert error.args[0] is exception + assert results == [error] + return + assert results == coros + + +@pytest.mark.asyncio +@pytest.mark.parametrize("closed", [True, False]) +@pytest.mark.parametrize("nolimit", [True, False]) +@pytest.mark.parametrize("decrement", [None, True, False]) +async def test_aio_concurrent_on_task_complete(patches, closed, nolimit, decrement): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.exit_on_completion", dict(new_callable=AsyncMock)), + ("concurrent.closed", dict(new_callable=PropertyMock)), + ("concurrent.out", dict(new_callable=PropertyMock)), + ("concurrent.running_queue", dict(new_callable=PropertyMock)), + ("concurrent.nolimit", dict(new_callable=PropertyMock)), + ("concurrent.sem", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + kwargs = {} + if decrement is not None: + kwargs["decrement"] = decrement + + with patched as (m_complete, m_closed, m_out, m_running_queue, m_nolimit, m_sem): + m_nolimit.return_value = nolimit + m_closed.return_value = closed + m_out.return_value.put = AsyncMock() + assert not await concurrent.on_task_complete("RESULT", **kwargs) + + if closed: + assert not m_complete.called + assert not m_nolimit.called + assert not m_sem.called + assert not m_running_queue.called + assert not m_out.return_value.put.called + return + + assert ( + list(m_out.return_value.put.call_args) + == [("RESULT", ), {}]) + if nolimit: + assert not m_sem.return_value.release.called + else: + assert ( + list(m_sem.return_value.release.call_args) + == [(), {}]) + if decrement or decrement is None: + assert ( + list(m_running_queue.return_value.get_nowait.call_args) + == [(), {}]) + else: + assert not m_running_queue.return_value.get_nowait.called + assert ( + list(m_complete.call_args) + == [(), {}]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("result_count", range(0, 7)) +@pytest.mark.parametrize("error", [True, False]) +@pytest.mark.parametrize("should_error", [True, False]) +async def test_aio_concurrent_output(patches, result_count, error, should_error): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "concurrent.should_error", + ("concurrent.cancel", dict(new_callable=AsyncMock)), + ("concurrent.close", dict(new_callable=AsyncMock)), + ("concurrent.out", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + exception = Exception() + + class DummyQueue: + _running_queue = 0 + + async def get(self): + if result_count == 0: + return aio._sentinel + if result_count > self._running_queue: + self._running_queue += 1 + if error and result_count == self._running_queue: + return exception + return f"RESULT {self._running_queue}" + return aio._sentinel + + def should_error(self, result): + return error and should_error and (result_count == self._running_queue) + + q = DummyQueue() + results = [] + + with patched as (m_error, m_cancel, m_close, m_out): + m_out.return_value.get.side_effect = q.get + m_error.side_effect = q.should_error + if result_count and error and should_error: + with pytest.raises(Exception): + async for result in concurrent.output(): + results.append(result) + else: + async for result in concurrent.output(): + results.append(result) + + if result_count and error and should_error: + # last one errored + assert results == [f"RESULT {i}" for i in range(1, result_count)] + assert ( + list(list(c) for c in m_error.call_args_list) + == [[(result,), {}] for result in results] + [[(exception,), {}]]) + assert ( + list(m_cancel.call_args) + == [(), {}]) + assert not m_close.called + return + + assert ( + list(list(c) for c in m_close.call_args_list) + == [[(), {}]]) + assert not m_cancel.called + + if not result_count: + assert results == [] + return + + if error: + assert ( + results + == [f"RESULT {i}" for i in range(1, result_count)] + [exception]) + return + # all results returned correctly + assert results == [f"RESULT {i}" for i in range(1, result_count + 1)] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("closed_before", [True, False]) +@pytest.mark.parametrize("closed_after", [True, False]) +@pytest.mark.parametrize("nolimit", [True, False]) +async def test_aio_concurrent_ready(patches, closed_before, closed_after, nolimit): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + ("concurrent.closed", dict(new_callable=PropertyMock)), + ("concurrent.nolimit", dict(new_callable=PropertyMock)), + ("concurrent.sem", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + + class DummyCloser: + order_mock = MagicMock() + close_calls = 0 + + async def _acquire(self): + self.order_mock("ACQUIRE") + + def _nolimit(self): + self.order_mock("NOLIMIT") + return nolimit + + def _closed(self): + self.order_mock("CLOSED") + self.close_calls += 1 + if self.close_calls == 1: + return closed_before + if self.close_calls == 2: + return closed_after + + closer = DummyCloser() + + with patched as (m_closed, m_nolimit, m_sem): + m_nolimit.side_effect = closer._nolimit + m_closed.side_effect = closer._closed + m_sem.return_value.acquire = closer._acquire + assert ( + await concurrent.ready() + == ((not closed_before and not closed_after) + if not nolimit else not closed_before)) + + if closed_before: + assert not m_nolimit.called + assert not m_sem.called + assert ( + list(list(c) for c in closer.order_mock.call_args_list) + == [[('CLOSED',), {}]]) + return + if nolimit: + assert not m_sem.called + assert ( + list(list(c) for c in closer.order_mock.call_args_list) + == [[('CLOSED',), {}], + [('NOLIMIT',), {}]]) + return + assert ( + list(list(c) for c in closer.order_mock.call_args_list) + == [[('CLOSED',), {}], + [('NOLIMIT',), {}], + [('ACQUIRE',), {}], + [('CLOSED',), {}]]) + + +def test_aio_concurrent_remember_task(): + concurrent = aio.concurrent(["CORO"]) + concurrent._running = MagicMock() + task = MagicMock() + assert not concurrent.remember_task(task) + assert ( + list(concurrent._running.append.call_args) + == [(task, ), {}]) + assert ( + list(task.add_done_callback.call_args) + == [(concurrent.forget_task, ), {}]) + + +@pytest.mark.parametrize("result", [None, "RESULT", aio.ConcurrentError, aio.ConcurrentExecutionError, aio.ConcurrentIteratorError]) +@pytest.mark.parametrize("yield_exceptions", [True, False]) +def test_aio_concurrent_should_error(result, yield_exceptions): + concurrent = aio.concurrent(["CORO"]) + concurrent.yield_exceptions = yield_exceptions + + if isinstance(result, type) and issubclass(result, BaseException): + result = result() + + assert ( + concurrent.should_error(result) + == ((isinstance(result, aio.ConcurrentIteratorError) + or isinstance(result, aio.ConcurrentError) and not yield_exceptions))) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("coros", range(0, 7)) +@pytest.mark.parametrize("unready", range(0, 8)) +@pytest.mark.parametrize("valid_raises", [None, Exception, aio.ConcurrentError]) +@pytest.mark.parametrize("iter_errors", [True, False]) +async def test_aio_concurrent_submit(patches, coros, unready, valid_raises, iter_errors): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "isinstance", + "concurrent.validate_coro", + ("concurrent.exit_on_completion", dict(new_callable=AsyncMock)), + ("concurrent.create_task", dict(new_callable=AsyncMock)), + ("concurrent.on_task_complete", dict(new_callable=AsyncMock)), + ("concurrent.ready", dict(new_callable=AsyncMock)), + ("concurrent.coros", dict(new_callable=PropertyMock)), + ("concurrent.out", dict(new_callable=PropertyMock)), + ("concurrent.submission_lock", dict(new_callable=PropertyMock)), + prefix="tools.base.aio") + m_order = MagicMock() + + class DummyReady: + counter = 0 + + def ready(self): + if self.counter >= unready: + self.counter += 1 + return False + self.counter += 1 + return True + + ready = DummyReady() + + async def acquire(): + m_order("ACQUIRE") + + def release(): + m_order("RELEASE") + + corolist = [MagicMock() for coro in range(1, coros)] + + async def iter_coros(): + for coro in corolist: + m_order(coro) + yield coro + + valid_errors = ( + (valid_raises == Exception) + and coros > 1 + and not unready == 0 + and not iter_errors) + + with patched as (m_inst, m_valid, m_exit, m_create, m_complete, m_ready, m_coros, m_out, m_lock): + m_out.return_value.put = AsyncMock() + m_inst.return_value = iter_errors + m_valid.side_effect = valid_raises + m_ready.side_effect = ready.ready + m_coros.return_value = iter_coros() + m_lock.return_value.acquire.side_effect = acquire + m_lock.return_value.release.side_effect = release + + if valid_errors: + with pytest.raises(Exception): + await concurrent.submit() + else: + assert not await concurrent.submit() + + if valid_errors: + assert not m_lock.return_value.called + assert not m_exit.called + else: + assert ( + list(m_lock.return_value.release.call_args) + == [(), {}]) + assert ( + list(m_exit.call_args) + == [(), {}]) + + if coros < 2: + assert not m_valid.called + assert not m_inst.called + assert not m_complete.called + assert not m_create.called + assert not m_ready.called + assert not m_out.return_value.put.called + return + + should_close_coro = ( + not iter_errors + and not valid_errors + and (len(corolist) > unready)) + + if should_close_coro: + assert corolist[unready].close.called + else: + assert not any(coro.close.called for coro in corolist) + + if iter_errors: + assert ( + list(list(c) for c in m_out.return_value.put.call_args_list) + == [[(corolist[0], ), {}]]) + assert ( + list(list(c) for c in m_inst.call_args_list) + == [[(corolist[0], aio.ConcurrentIteratorError), {}]]) + assert not m_ready.called + assert not m_valid.called + assert not m_complete.called + assert not m_create.called + return + + if valid_errors: + assert ( + list(list(c) for c in m_inst.call_args_list) + == [[(corolist[0], aio.ConcurrentIteratorError), {}]]) + assert ( + list(list(c) for c in m_ready.call_args_list) + == [[(), {}]]) + assert ( + list(list(c) for c in m_valid.call_args_list) + == [[(corolist[0], ), {}]]) + assert not m_complete.called + assert not m_create.called + assert ( + list(list(c) for c in m_order.call_args_list) + == ([[('ACQUIRE',), {}], + [(corolist[0],), {}]])) + return + + assert not m_out.return_value.put.called + assert ( + list(list(c) for c in m_ready.call_args_list) + == [[(), {}]] * min(coros - 1, unready + 1 or 1)) + assert ( + list(list(c) for c in m_valid.call_args_list) + == [[(corolist[i - 1], ), {}] for i in range(1, min(coros, unready + 1))]) + assert ( + list(list(c) for c in m_order.call_args_list) + == ([[('ACQUIRE',), {}]] + + [[(corolist[i - 1],), {}] for i in range(1, min(coros, unready + 2))] + + [[('RELEASE',), {}]])) + if valid_raises: + assert len(m_complete.call_args_list) == max(min(coros - 1, unready), 0) + for c in m_complete.call_args_list: + error = list(c)[0][0] + assert isinstance(error, aio.ConcurrentError) + assert ( + list(c) + == [(error,), {'decrement': False}]) + assert not m_create.called + return + assert not m_complete.called + assert ( + list(list(c) for c in m_create.call_args_list) + == [[(corolist[i - 1],), {}] for i in range(1, min(coros, unready + 1))]) + + +class OtherException(BaseException): + pass + + +@pytest.mark.asyncio +@pytest.mark.parametrize("raises", [None, Exception, OtherException]) +async def test_aio_concurrent_task(patches, raises): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "concurrent.on_task_complete", + prefix="tools.base.aio") + + if raises: + exception = raises("AN ERROR OCCURRED") + + async def coro(): + if raises: + raise exception + return 23 + + with patched as (m_complete, ): + assert not await concurrent.task(coro()) + + result = m_complete.call_args[0][0] + + if not raises: + assert result == 23 + else: + assert isinstance(result, aio.ConcurrentExecutionError) + assert result.args[0] is exception + assert ( + list(m_complete.call_args) + == [(result, ), {}]) + + +@pytest.mark.parametrize("awaitable", [True, False]) +@pytest.mark.parametrize( + "state", + [inspect.CORO_CLOSED, + inspect.CORO_CREATED, + inspect.CORO_RUNNING, + inspect.CORO_SUSPENDED]) +def test_aio_concurrent_validate_coro(patches, awaitable, state): + concurrent = aio.concurrent(["CORO"]) + patched = patches( + "inspect.getcoroutinestate", + prefix="tools.base.aio") + + # we cant patch inspect.isawaitable without fooing unittest + def unawaitable(): + pass + + async def coro(): + pass + + awaits = ( + coro() + if awaitable + else unawaitable) + + with patched as (m_state, ): + m_state.return_value = state + + if awaitable and state == inspect.CORO_CREATED: + assert not concurrent.validate_coro(awaits) + else: + with pytest.raises(aio.ConcurrentError) as e: + concurrent.validate_coro(awaits) + + if not awaitable: + assert ( + e.value.args[0] + == f'Provided input was not a coroutine: {awaits}') + assert not m_state.called + return + + awaits.close() + assert ( + list(m_state.call_args) + == [(awaits, ), {}]) + + if state != inspect.CORO_CREATED: + assert ( + e.value.args[0] + == f'Provided coroutine has already been fired: {awaits}') + + +async def aiter(items): + for item in items: + yield item + + +@pytest.mark.asyncio +@pytest.mark.parametrize("limit", list(range(0, 4)) + [-1]) +@pytest.mark.parametrize("yield_exceptions", [None, True, False]) +@pytest.mark.parametrize("iter_type", [list, tuple, set, iter, aiter]) +@pytest.mark.parametrize( + "coros", + [["HAPPY"], + ["HAPPY"] * 2 + ["SAD"] + ["HAPPY"] * 3, + ["HAPPY"] * 7, + ["HAPPY"] * 2 + ["RAISE"] + ["HAPPY"] * 3, + ["SAD"] * 2 + ["HAPPY"] * 3, + ["HAPPY"] * 2 + ["CABBAGE"] + ["HAPPY"] * 3, + ["HAPPY"] * 2 + ["FIRED"] + ["HAPPY"] * 3]) +async def test_aio_concurrent_integration(limit, yield_exceptions, iter_type, coros): + # This is an integration/black-box test that only measures inputs/outputs and the + # effect of using the utility with them on them + + # `HAPPY` - a happy coroutine ready to be fired + # `SAD` - a sad coroutine that will raise a `SadError` when fired + # `FIRED` - a coroutine that has already been fired + # `RAISE` - raise an error in the iterator + # `CABBAGE` - leafy vegetable of the brassica family + + tasks_at_the_beginning = len(asyncio.all_tasks()) + + kwargs = {} + + if yield_exceptions is not None: + kwargs["yield_exceptions"] = yield_exceptions + + if limit: + kwargs["limit"] = limit + + class SadError(Exception): + pass + + class LoopError(Exception): + pass + + async def happy(): + # this makes happy return after sad (ie errors) and tests the ordering of responses + # and the handling of pending tasks when errors occur + await asyncio.sleep(.01) + return "HAPPY" + + fired = happy() + await fired + + async def sad(): + raise SadError + + def coro_gen(): + for coro in coros: + if coro == "RAISE": + raise LoopError() + if coro == "HAPPY": + yield happy() + elif coro == "SAD": + yield sad() + elif coro == "FIRED": + yield fired + else: + yield coro + + all_good = all(coro == "HAPPY" for coro in coros) + iter_raises = any(coro == "RAISE" for coro in coros) + + if iter_raises: + # we can only test the generator types for errors + # during iteration - ie if `list`, `tuple` etc contain + # errors, they would raise now. + if not iter_type in [iter, aiter]: + return + generated_coros = coro_gen() + else: + generated_coros = list(coro_gen()) + expected_err_index = next((i for i, x in enumerate(coros) if x != 'HAPPY'), None) + + results = [] + concurrent = aio.concurrent(iter_type(generated_coros), **kwargs) + + if (not all_good and not yield_exceptions) or iter_raises: + if iter_raises: + with pytest.raises(aio.ConcurrentIteratorError) as e: + async for result in concurrent: + results.append(result) + assert isinstance(e.value.args[0], LoopError) + return + else: + coro_fail = ( + any(not inspect.isawaitable(coro) for coro in generated_coros) + or any(coro == "FIRED" for coro in coros)) + if coro_fail: + with pytest.raises(aio.ConcurrentError): + async for result in concurrent: + results.append(result) + else: + with pytest.raises(aio.ConcurrentExecutionError): + async for result in concurrent: + results.append(result) + + # for iterators there is no way of knowing that more awaitables were + # on the way when failure happened, so these need to be closed here + if iter_type in (iter, aiter): + for coro in generated_coros[expected_err_index:]: + if not isinstance(coro, str): + coro.close() + + if limit < 1 and iter_type != set: + # as all jobs are submitted concurrently (the default is higher than + # tne number of test jobs, and -1 forces no limit) and as sad is + # faster than happy, we get no results + assert results == [] + elif iter_type != set: + # because the ordering on sets is indeterminate the results are unpredictable + # therefore the easiest thing is to just exclude them from this test + assert results == coros[:expected_err_index - (expected_err_index % limit)] + + # this can probs be removed, i think it was caused by unhandled GeneratorExit + await asyncio.sleep(.001) + gc.collect() + assert len(asyncio.all_tasks()) == tasks_at_the_beginning + return + + async for result in concurrent: + results.append(result) + + assert len(asyncio.all_tasks()) == tasks_at_the_beginning + + def mangled_results(): + # replace the errors with the test strings + for result in results: + if isinstance(result, aio.ConcurrentExecutionError): + yield "SAD" + elif isinstance(result, aio.ConcurrentError): + if "CABBAGE" in result.args[0]: + yield "CABBAGE" + else: + yield "FIRED" + else: + yield result + + if expected_err_index: + err_index = ( + expected_err_index + if limit == 0 + else expected_err_index - (expected_err_index % limit)) + + if expected_err_index and err_index >= limit and limit not in [0, -1]: + # the error is at the beginning of whichever batch its in + expected = ["HAPPY"] * 6 + expected[err_index] = coros[err_index] + else: + # the error is in the first batch so its at the beginning + expected = [x for x in list(coros) if x != "HAPPY"] + [x for x in list(coros) if x == "HAPPY"] + + if iter_type == set: + assert set(expected) == set(mangled_results()) + else: + assert expected == list(mangled_results()) diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index 9b18a023187c6..e3c7d3c155e8c 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -3,8 +3,9 @@ import pytest -from tools.base.checker import AsyncChecker, BazelChecker, Checker, CheckerSummary, ForkingChecker -from tools.base.runner import BazelRunner, ForkingRunner +from tools.base.checker import ( + AsyncChecker, BaseChecker, BazelChecker, Checker, CheckerSummary) +from tools.base.runner import BazelRunner class DummyChecker(Checker): @@ -13,12 +14,6 @@ def __init__(self): self.args = PropertyMock() -class DummyForkingChecker(ForkingChecker): - - def __init__(self): - self.args = PropertyMock() - - class DummyBazelChecker(BazelChecker): def __init__(self): @@ -50,7 +45,7 @@ def test_checker_constructor(): == [('path1', 'path2', 'path3'), {}]) assert checker.summary_class == CheckerSummary - assert checker.active_check is None + assert checker.active_check == "" assert "active_check" not in checker.__dict__ @@ -129,16 +124,16 @@ class DummyError(Exception): pass checker = Checker("path1", "path2", "path3") patched = patches( + "pathlib", ("Checker.args", dict(new_callable=PropertyMock)), ("Checker.parser", dict(new_callable=PropertyMock)), - "os.path.isdir", prefix="tools.base.checker") - with patched as (m_args, m_parser, m_isdir): + with patched as (m_plib, m_args, m_parser): m_parser.return_value.error = DummyError m_args.return_value.path = path m_args.return_value.paths = paths - m_isdir.return_value = isdir + m_plib.Path.return_value.is_dir.return_value = isdir if not path and not paths: with pytest.raises(DummyError) as e: checker.path @@ -152,12 +147,15 @@ class DummyError(Exception): e.value.args == ('Incorrect path: `path` must be a directory, set either as first arg or with --path',)) else: - assert checker.path == path or paths[0] + assert checker.path == m_plib.Path.return_value + assert ( + list(m_plib.Path.call_args) + == [(path or paths[0],), {}]) assert "path" in checker.__dict__ if path or paths: assert ( - list(m_isdir.call_args) - == [(path or paths[0],), {}]) + list(m_plib.Path.return_value.is_dir.call_args) + == [(), {}]) @pytest.mark.parametrize("paths", [[], ["path1", "path2"]]) @@ -345,7 +343,7 @@ def test_checker_add_arguments(patches): 'help': 'Paths to check. At least one path must be specified, or the `path` argument should be provided'}]]) -TEST_ERRORS = ( +TEST_ERRORS: tuple = ( {}, dict(myerror=[]), dict(myerror=["a", "b", "c"]), @@ -416,7 +414,7 @@ def test_checker_exit(patches): == [('exiting', ['Keyboard exit']), {'log_type': 'fatal'}]) -TEST_CHECKS = ( +TEST_CHECKS: tuple = ( None, (), ("check1", ), @@ -477,7 +475,7 @@ def test_checker_on_check_run(patches, errors, warnings, exiting): m_exit.return_value = exiting assert not checker.on_check_run(check) - assert checker.active_check is None + assert checker.active_check == "" if exiting: assert not m_log.called @@ -598,7 +596,7 @@ def test_checker_run(patches, raises): == [(), {}]) -TEST_WARNS = ( +TEST_WARNS: tuple = ( {}, dict(mywarn=[]), dict(mywarn=["a", "b", "c"]), @@ -630,7 +628,7 @@ def test_checker_warn(patches, log, warns): assert not m_log.return_value.warn.called -TEST_SUCCESS = ( +TEST_SUCCESS: tuple = ( {}, dict(mysuccess=[]), dict(mysuccess=["a", "b", "c"]), @@ -702,7 +700,7 @@ def test_checker_summary_print_summary(patches): assert m_status.called -TEST_SECTIONS = ( +TEST_SECTIONS: tuple = ( ("MSG1", ["a", "b", "c"]), ("MSG2", []), ("MSG3", None)) @@ -811,14 +809,6 @@ def _extra(prob): == expected) -# ForkingChecker test - -def test_forkingchecker_constructor(): - checker = DummyForkingChecker() - assert isinstance(checker, ForkingRunner) - assert isinstance(checker, Checker) - - # BazelChecker test def test_bazelchecker_constructor(): @@ -831,7 +821,7 @@ def test_bazelchecker_constructor(): def test_asynchecker_constructor(): checker = AsyncChecker() - assert isinstance(checker, Checker) + assert isinstance(checker, BaseChecker) @pytest.mark.parametrize("raises", [None, KeyboardInterrupt, Exception]) @@ -840,7 +830,7 @@ def test_asynchecker_run(patches, raises): patched = patches( "asyncio", - "Checker.exit", + "BaseChecker.exit", ("AsyncChecker._run", dict(new_callable=MagicMock)), ("AsyncChecker.on_checks_complete", dict(new_callable=MagicMock)), prefix="tools.base.checker") @@ -892,7 +882,7 @@ def test_asynchecker_run(patches, raises): async def test_asynchecker_on_check_begin(patches): checker = AsyncChecker() patched = patches( - "Checker.on_check_begin", + "BaseChecker.on_check_begin", prefix="tools.base.checker") with patched as (m_super, ): @@ -907,7 +897,7 @@ async def test_asynchecker_on_check_begin(patches): async def test_asynchecker_on_check_run(patches): checker = AsyncChecker() patched = patches( - "Checker.on_check_run", + "BaseChecker.on_check_run", prefix="tools.base.checker") with patched as (m_super, ): @@ -922,7 +912,7 @@ async def test_asynchecker_on_check_run(patches): async def test_asynchecker_on_checks_begin(patches): checker = AsyncChecker() patched = patches( - "Checker.on_checks_begin", + "BaseChecker.on_checks_begin", prefix="tools.base.checker") with patched as (m_super, ): @@ -938,7 +928,7 @@ async def test_asynchecker_on_checks_complete(patches): checker = AsyncChecker() patched = patches( - "Checker.on_checks_complete", + "BaseChecker.on_checks_complete", prefix="tools.base.checker") with patched as (m_complete, ): @@ -976,8 +966,8 @@ class SomeError(Exception): checker = AsyncCheckerWithChecks() patched = patches( - "Checker.log", - "Checker.get_checks", + "BaseChecker.log", + "BaseChecker.get_checks", "AsyncChecker.on_checks_begin", "AsyncChecker.on_check_begin", "AsyncChecker.on_check_run", diff --git a/tools/base/tests/test_runner.py b/tools/base/tests/test_runner.py index a1e4e4fd434e8..4b88cda46c080 100644 --- a/tools/base/tests/test_runner.py +++ b/tools/base/tests/test_runner.py @@ -1,7 +1,7 @@ import importlib import logging import sys -from unittest.mock import MagicMock, patch, PropertyMock +from unittest.mock import AsyncMock, MagicMock, patch, PropertyMock import pytest @@ -13,7 +13,7 @@ importlib.reload(runner) -class DummyRunner(runner.Runner): +class DummyRunner(runner.BaseRunner): def __init__(self): self.args = PropertyMock() @@ -25,7 +25,7 @@ def __init__(self): self.args = PropertyMock() -class Error1(Exception): +class OneError(Exception): def __str__(self): return "" @@ -33,13 +33,17 @@ def __str__(self): pass -class Error2(Exception): +class TwoError(Exception): pass def _failing_runner(errors): - class DummyFailingRunner(object): + class DummyFailingRunner: + # this dummy runner calls the _runner mock + # when its run/run_async methods are called + # and optionally raises some type of error + # to ensure they are caught as expected log = PropertyMock() _runner = MagicMock() @@ -54,22 +58,31 @@ def run(self, *args, **kwargs): raise self.raises("AN ERROR OCCURRED") return result + @runner.catches(errors) + async def run_async(self, *args, **kwargs): + result = self._runner(*args, **kwargs) + if self.raises: + raise self.raises("AN ERROR OCCURRED") + return result + return DummyFailingRunner +@pytest.mark.asyncio +@pytest.mark.parametrize("async_fun", [True, False]) @pytest.mark.parametrize( "errors", - [Error1, (Error1, Error2)]) + [OneError, (OneError, TwoError)]) @pytest.mark.parametrize( "raises", - [None, Error1, Error2]) + [None, OneError, TwoError]) @pytest.mark.parametrize( "args", [(), ("ARG1", "ARG2")]) @pytest.mark.parametrize( "kwargs", [{}, dict(key1="VAL1", key2="VAL2")]) -def test_catches(errors, raises, args, kwargs): +async def test_catches(errors, async_fun, raises, args, kwargs): run = _failing_runner(errors)(raises) should_fail = ( raises @@ -78,12 +91,15 @@ def test_catches(errors, raises, args, kwargs): or (isinstance(errors, tuple) and raises in errors))) + assert run.run.__wrapped__.__catches__ == errors + assert run.run_async.__wrapped__.__catches__ == errors + if should_fail: result = 1 with pytest.raises(raises): - run.run(*args, **kwargs) + run.run(*args, **kwargs) if not async_fun else await run.run_async(*args, **kwargs) else: - result = run.run(*args, **kwargs) + result = run.run(*args, **kwargs) if not async_fun else await run.run_async(*args, **kwargs) assert ( list(run._runner.call_args) @@ -108,18 +124,84 @@ def test_catches(errors, raises, args, kwargs): assert result == run._runner.return_value -def test_runner_constructor(): - run = runner.Runner("path1", "path2", "path3") +def _cleanup_runner(async_fun, raises): + + class DummyCleanupRunner: + # this dummy runner calls the _runner mock + # when its run/async_fun methods are called + # and optionally raises some type of error + # to ensure they are caught as expected + + log = PropertyMock() + _runner = MagicMock() + + @runner.cleansup + def run(self, *args, **kwargs): + result = self._runner(*args, **kwargs) + if raises: + raise Exception("AN ERROR OCCURRED") + return result + + @runner.cleansup + async def run_async(self, *args, **kwargs): + result = self._runner(*args, **kwargs) + if raises: + raise Exception("AN ERROR OCCURRED") + return result + + return DummyCleanupRunner() + + +@pytest.mark.asyncio +@pytest.mark.parametrize("async_fun", [True, False]) +@pytest.mark.parametrize("raises", [True, False]) +async def test_cleansup(async_fun, raises): + run = _cleanup_runner(async_fun, raises) + args = [f"ARG{i}" for i in range(0, 3)] + kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} + + assert run.run.__wrapped__.__cleansup__ is True + assert run.run_async.__wrapped__.__cleansup__ is True + + if async_fun: + run.cleanup = AsyncMock() + if raises: + with pytest.raises(Exception): + await run.run_async(*args, **kwargs) + else: + assert ( + await run.run_async(*args, **kwargs) + == run._runner.return_value) + else: + run.cleanup = MagicMock() + if raises: + with pytest.raises(Exception): + run.run(*args, **kwargs) + else: + assert ( + run.run(*args, **kwargs) + == run._runner.return_value) + + assert ( + list(run._runner.call_args) + == [tuple(args), kwargs]) + assert ( + list(run.cleanup.call_args) + == [(), {}]) + + +def test_base_runner_constructor(): + run = runner.BaseRunner("path1", "path2", "path3") assert run._args == ("path1", "path2", "path3") assert run.log_field_styles == runner.LOG_FIELD_STYLES assert run.log_level_styles == runner.LOG_LEVEL_STYLES assert run.log_fmt == runner.LOG_FMT -def test_runner_args(): - run = runner.Runner("path1", "path2", "path3") +def test_base_runner_args(): + run = runner.BaseRunner("path1", "path2", "path3") parser_mock = patch( - "tools.base.runner.Runner.parser", + "tools.base.runner.BaseRunner.parser", new_callable=PropertyMock) with parser_mock as m_parser: @@ -134,10 +216,10 @@ def test_runner_args(): assert "args" in run.__dict__ -def test_runner_extra_args(): - run = runner.Runner("path1", "path2", "path3") +def test_base_runner_extra_args(): + run = runner.BaseRunner("path1", "path2", "path3") parser_mock = patch( - "tools.base.runner.Runner.parser", + "tools.base.runner.BaseRunner.parser", new_callable=PropertyMock) with parser_mock as m_parser: @@ -152,18 +234,18 @@ def test_runner_extra_args(): assert "extra_args" in run.__dict__ -def test_runner_log(patches): - run = runner.Runner("path1", "path2", "path3") +def test_base_runner_log(patches): + run = runner.BaseRunner("path1", "path2", "path3") patched = patches( "logging.getLogger", "LogFilter", "coloredlogs", "verboselogs", - ("Runner.log_level", dict(new_callable=PropertyMock)), - ("Runner.log_level_styles", dict(new_callable=PropertyMock)), - ("Runner.log_field_styles", dict(new_callable=PropertyMock)), - ("Runner.log_fmt", dict(new_callable=PropertyMock)), - ("Runner.name", dict(new_callable=PropertyMock)), + ("BaseRunner.log_level", dict(new_callable=PropertyMock)), + ("BaseRunner.log_level_styles", dict(new_callable=PropertyMock)), + ("BaseRunner.log_field_styles", dict(new_callable=PropertyMock)), + ("BaseRunner.log_fmt", dict(new_callable=PropertyMock)), + ("BaseRunner.name", dict(new_callable=PropertyMock)), prefix="tools.base.runner") with patched as patchy: @@ -192,11 +274,11 @@ def test_runner_log(patches): assert "log" in run.__dict__ -def test_runner_log_level(patches): - run = runner.Runner("path1", "path2", "path3") +def test_base_runner_log_level(patches): + run = runner.BaseRunner("path1", "path2", "path3") patched = patches( "dict", - ("Runner.args", dict(new_callable=PropertyMock)), + ("BaseRunner.args", dict(new_callable=PropertyMock)), prefix="tools.base.runner") with patched as (m_dict, m_args): assert run.log_level == m_dict.return_value.__getitem__.return_value @@ -210,17 +292,17 @@ def test_runner_log_level(patches): assert "log_level" in run.__dict__ -def test_runner_name(): +def test_base_runner_name(): run = DummyRunner() assert run.name == run.__class__.__name__ assert "name" not in run.__dict__ -def test_runner_parser(patches): - run = runner.Runner("path1", "path2", "path3") +def test_base_runner_parser(patches): + run = runner.BaseRunner("path1", "path2", "path3") patched = patches( "argparse.ArgumentParser", - "Runner.add_arguments", + "BaseRunner.add_arguments", prefix="tools.base.runner") with patched as (m_parser, m_add_args): assert run.parser == m_parser.return_value @@ -234,24 +316,26 @@ def test_runner_parser(patches): assert "parser" in run.__dict__ -def test_checker_path(): - run = runner.Runner("path1", "path2", "path3") - cwd_mock = patch("tools.base.runner.os.getcwd") +def test_base_runner_path(patches): + run = runner.BaseRunner("path1", "path2", "path3") + patched = patches( + "pathlib", + prefix="tools.base.runner") - with cwd_mock as m_cwd: - assert run.path == m_cwd.return_value + with patched as (m_plib, ): + assert run.path == m_plib.Path.return_value assert ( - list(m_cwd.call_args) - == [(), {}]) + list(m_plib.Path.call_args) + == [(".", ), {}]) -def test_checker_stdout(patches): - run = runner.Runner("path1", "path2", "path3") +def test_base_runner_stdout(patches): + run = runner.BaseRunner("path1", "path2", "path3") patched = patches( "logging", - ("Runner.log_level", dict(new_callable=PropertyMock)), + ("BaseRunner.log_level", dict(new_callable=PropertyMock)), prefix="tools.base.runner") with patched as (m_log, m_level): @@ -277,8 +361,34 @@ def test_checker_stdout(patches): == [(m_log.StreamHandler.return_value,), {}]) -def test_runner_add_arguments(): - run = runner.Runner("path1", "path2", "path3") +@pytest.mark.parametrize("missing", [True, False]) +def test_base_runner_tempdir(patches, missing): + run = runner.BaseRunner() + patched = patches( + "tempfile", + ("BaseRunner.log", dict(new_callable=PropertyMock)), + ("BaseRunner._missing_cleanup", dict(new_callable=PropertyMock)), + prefix="tools.base.runner") + + with patched as (m_tmp, m_log, m_missing): + m_missing.return_value = missing + assert run.tempdir == m_tmp.TemporaryDirectory.return_value + + if missing: + assert ( + list(m_log.return_value.warning.call_args) + == [("Tempdir created but instance has a `run` method which is not decorated with `@runner.cleansup`", ), {}]) + else: + assert not m_log.called + + assert ( + list(m_tmp.TemporaryDirectory.call_args) + == [(), {}]) + assert "tempdir" in run.__dict__ + + +def test_base_runner_add_arguments(): + run = runner.BaseRunner("path1", "path2", "path3") parser = MagicMock() assert run.add_arguments(parser) is None @@ -291,12 +401,64 @@ def test_runner_add_arguments(): ]) +@pytest.mark.parametrize("has_fun", [True, False]) +@pytest.mark.parametrize("is_wrapped", [True, False]) +@pytest.mark.parametrize("cleansup", [True, False]) +def test_base_runner__missing_cleanup(has_fun, is_wrapped, cleansup): + + def _runner_factory(): + if not has_fun: + return runner.BaseRunner() + + class _Wrap: + if cleansup: + __cleansup__ = True + + class _Wrapper: + if is_wrapped: + __wrapped__ = _Wrap() + + class DummyRunner(runner.BaseRunner): + run = _Wrapper() + + return DummyRunner() + + run = _runner_factory() + + assert ( + run._missing_cleanup + == (has_fun + and not (is_wrapped and cleansup))) + assert "_missing_cleanup" not in run.__dict__ + + +@pytest.mark.parametrize("cached", [True, False]) +def test_base_runner__cleanup_tempdir(patches, cached): + run = runner.BaseRunner() + patched = patches( + ("BaseRunner.tempdir", dict(new_callable=PropertyMock)), + prefix="tools.base.runner") + if cached: + run.__dict__["tempdir"] = "TEMPDIR" + + with patched as (m_temp, ): + assert not run._cleanup_tempdir() + + if cached: + assert ( + list(m_temp.return_value.cleanup.call_args) + == [(), {}]) + else: + assert not m_temp.called + assert "tempdir" not in run.__dict__ + + # LogFilter tests @pytest.mark.parametrize("level", [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, None, "giraffe"]) -def test_runner_log_filter(level): +def test_base_runner_log_filter(level): logfilter = runner.LogFilter() - class DummyRecord(object): + class DummyRecord: levelno = level if level in [logging.DEBUG, logging.INFO]: @@ -305,6 +467,69 @@ class DummyRecord(object): assert not logfilter.filter(DummyRecord()) +def test_runner_constructor(patches): + patched = patches( + "BaseRunner.__init__", + prefix="tools.base.runner") + args = [f"ARG{i}" for i in range(0, 3)] + kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} + + with patched as (m_super, ): + m_super.return_value = None + run = runner.Runner(*args, **kwargs) + + assert isinstance(run, runner.BaseRunner) + assert ( + list(m_super.call_args) + == [tuple(args), kwargs]) + + +def test_runner_cleanup(patches): + run = runner.Runner() + patched = patches( + "Runner._cleanup_tempdir", + prefix="tools.base.runner") + + with patched as (m_temp, ): + assert not run.cleanup() + + assert ( + list(m_temp.call_args) + == [(), {}]) + + +def test_async_runner_constructor(patches): + patched = patches( + "BaseRunner.__init__", + prefix="tools.base.runner") + args = [f"ARG{i}" for i in range(0, 3)] + kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} + + with patched as (m_super, ): + m_super.return_value = None + run = runner.AsyncRunner(*args, **kwargs) + + assert isinstance(run, runner.BaseRunner) + assert ( + list(m_super.call_args) + == [tuple(args), kwargs]) + + +@pytest.mark.asyncio +async def test_async_runner_cleanup(patches): + run = runner.AsyncRunner() + patched = patches( + "AsyncRunner._cleanup_tempdir", + prefix="tools.base.runner") + + with patched as (m_temp, ): + assert not await run.cleanup() + + assert ( + list(m_temp.call_args) + == [(), {}]) + + # BazelAdapter tests def test_bazeladapter_constructor(): @@ -436,7 +661,7 @@ def test_forkingadapter_subproc_run(patches, args, cwd, capture_output): adapter = runner.ForkingAdapter(DummyRunner()) patched = patches( "subprocess.run", - ("Runner.path", dict(new_callable=PropertyMock)), + ("BaseRunner.path", dict(new_callable=PropertyMock)), prefix="tools.base.runner") with patched as (m_run, m_path): diff --git a/tools/base/tests/test_utils.py b/tools/base/tests/test_utils.py index dc403ec999eb4..5ea95efb4ac3a 100644 --- a/tools/base/tests/test_utils.py +++ b/tools/base/tests/test_utils.py @@ -1,6 +1,7 @@ import importlib import sys from contextlib import contextmanager +from unittest.mock import MagicMock import pytest @@ -53,7 +54,6 @@ def test_util_buffered_stdout_stderr(): def test_util_buffered_no_stdout_stderr(): - with pytest.raises(utils.BufferUtilError): with utils.buffered(): pass @@ -119,31 +119,65 @@ def test_util_coverage_with_data_file(patches): == [(m_open.return_value.__enter__.return_value,), {}]) -def test_util_extract(patches): + +@pytest.mark.parametrize( + "tarballs", + [(), tuple("TARB{i}" for i in range(0, 3))]) +def test_util_extract(patches, tarballs): patched = patches( - "tempfile.TemporaryDirectory", + "nested", + "pathlib", "tarfile.open", prefix="tools.base.utils") - with patched as (m_tmp, m_open): - assert utils.extract("TARBALL", "PATH") == "PATH" + with patched as (m_nested, m_plib, m_open): + _extractions = [MagicMock(), MagicMock()] + m_nested.return_value.__enter__.return_value = _extractions + + if tarballs: + assert utils.extract("PATH", *tarballs) == m_plib.Path.return_value + else: + with pytest.raises(utils.ExtractError) as e: + utils.extract("PATH", *tarballs) + + if not tarballs: + assert ( + e.value.args[0] + == 'No tarballs specified for extraction to PATH') + assert not m_nested.called + assert not m_open.called + for _extract in _extractions: + assert not _extract.extractall.called + return assert ( - list(m_open.call_args) - == [('TARBALL',), {}]) + list(m_plib.Path.call_args) + == [("PATH", ), {}]) + + for _extract in _extractions: + assert ( + list(_extract.extractall.call_args) + == [(), dict(path="PATH")]) + assert ( - list(m_open.return_value.__enter__.return_value.extractall.call_args) - == [(), {'path': "PATH"}]) + list(m_open.call_args_list) + == [[(tarb, ), {}] for tarb in tarballs]) + assert ( + list(m_nested.call_args) + == [tuple(m_open.return_value for x in tarballs), {}]) -def test_util_untar(patches): +@pytest.mark.parametrize( + "tarballs", + [(), tuple("TARB{i}" for i in range(0, 3))]) +def test_util_untar(patches, tarballs): patched = patches( "tempfile.TemporaryDirectory", "extract", prefix="tools.base.utils") with patched as (m_tmp, m_extract): - with utils.untar("PATH") as tmpdir: + with utils.untar(*tarballs) as tmpdir: assert tmpdir == m_extract.return_value assert ( @@ -151,44 +185,44 @@ def test_util_untar(patches): == [(), {}]) assert ( list(m_extract.call_args) - == [('PATH', m_tmp.return_value.__enter__.return_value), {}]) + == [(m_tmp.return_value.__enter__.return_value, ) + tarballs, {}]) def test_util_from_yaml(patches): patched = patches( - "open", + "pathlib", "yaml", prefix="tools.base.utils") - with patched as (m_open, m_yaml): + with patched as (m_plib, m_yaml): assert utils.from_yaml("PATH") == m_yaml.safe_load.return_value assert ( - list(m_open.call_args) + list(m_plib.Path.call_args) == [("PATH", ), {}]) assert ( list(m_yaml.safe_load.call_args) - == [(m_open.return_value.__enter__.return_value.read.return_value, ), {}]) + == [(m_plib.Path.return_value.read_text.return_value, ), {}]) assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) + list(m_plib.Path.return_value.read_text.call_args) == [(), {}]) def test_util_to_yaml(patches): patched = patches( - "open", + "pathlib", "yaml", prefix="tools.base.utils") - with patched as (m_open, m_yaml): - assert utils.to_yaml("DATA", "PATH") == "PATH" + with patched as (m_plib, m_yaml): + assert utils.to_yaml("DATA", "PATH") == m_plib.Path.return_value - assert ( - list(m_open.call_args) - == [("PATH", "w"), {}]) assert ( list(m_yaml.dump.call_args) == [("DATA", ), {}]) assert ( - list(m_open.return_value.__enter__.return_value.write.call_args) + list(m_plib.Path.return_value.write_text.call_args) == [(m_yaml.dump.return_value, ), {}]) + assert ( + list(m_plib.Path.call_args) + == [("PATH", ), {}]) diff --git a/tools/base/utils.py b/tools/base/utils.py index 379e8f4326333..ca92cdc4e78be 100644 --- a/tools/base/utils.py +++ b/tools/base/utils.py @@ -4,15 +4,21 @@ import io import os +import pathlib import tarfile import tempfile from configparser import ConfigParser from contextlib import ExitStack, contextmanager, redirect_stderr, redirect_stdout -from typing import Callable, Iterator, List, Optional, Union +from pathlib import Path +from typing import Callable, ContextManager, Iterator, List, Optional, Union import yaml +class ExtractError(Exception): + pass + + # this is testing specific - consider moving to tools.testing.utils @contextmanager def coverage_with_data_file(data_file: str) -> Iterator[str]: @@ -74,14 +80,19 @@ def buffered( stderr.extend(mangle(_stderr.read().strip().split("\n"))) -def extract(tarball: str, path: str) -> str: - with tarfile.open(tarball) as tarfiles: - tarfiles.extractall(path=path) - return path +def extract(path: Union[pathlib.Path, str], *tarballs: Union[pathlib.Path, str]) -> pathlib.Path: + if not tarballs: + raise ExtractError(f"No tarballs specified for extraction to {path}") + openers = nested(*tuple(tarfile.open(tarball) for tarball in tarballs)) + + with openers as tarfiles: + for tar in tarfiles: + tar.extractall(path=path) + return pathlib.Path(path) @contextmanager -def untar(tarball: str) -> Iterator[str]: +def untar(*tarballs: Union[pathlib.Path, str]) -> Iterator[pathlib.Path]: """Untar a tarball into a temporary directory for example to list the contents of a tarball: @@ -102,20 +113,30 @@ def untar(tarball: str) -> Iterator[str]: """ with tempfile.TemporaryDirectory() as tmpdir: - yield extract(tarball, tmpdir) + yield extract(tmpdir, *tarballs) -def from_yaml(path: str) -> Union[dict, list, str, int]: +def from_yaml(path: Union[pathlib.Path, str]) -> Union[dict, list, str, int]: """Returns the loaded python object from a yaml file given by `path`""" - with open(path) as f: - return yaml.safe_load(f.read()) + return yaml.safe_load(pathlib.Path(path).read_text()) -def to_yaml(data: Union[dict, list, str, int], path: str) -> str: +def to_yaml(data: Union[dict, list, str, int], path: Union[pathlib.Path, str]) -> pathlib.Path: """For given `data` dumps as yaml to provided `path`. Returns `path` """ - with open(path, "w") as f: - f.write(yaml.dump(data)) + path = pathlib.Path(path) + path.write_text(yaml.dump(data)) return path + + +@contextmanager +def cd_and_return(path: Union[pathlib.Path, str]) -> ContextManager[None]: + """Changes working directory to given path and returns to previous working directory on exit""" + prev_cwd = Path.cwd() + try: + os.chdir(path) + yield + finally: + os.chdir(prev_cwd) diff --git a/tools/code_format/BUILD b/tools/code_format/BUILD index 95784d0ef412f..bb9683c151330 100644 --- a/tools/code_format/BUILD +++ b/tools/code_format/BUILD @@ -15,6 +15,7 @@ exports_files([ envoy_py_binary( name = "tools.code_format.python_check", deps = [ + "//tools/base:aio", "//tools/base:checker", "//tools/base:utils", requirement("flake8"), diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index cddeafcc42a3a..d46f40af7dc2c 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -23,6 +23,7 @@ "./bazel-", "./.cache", "./source/extensions/extensions_build_config.bzl", + "./contrib/contrib_build_config.bzl", "./bazel/toolchains/configs/", "./tools/testdata/check_format/", "./tools/pyformat/", @@ -63,15 +64,13 @@ # perform temporary registrations. REGISTER_FACTORY_TEST_ALLOWLIST = ( "./test/common/config/registry_test.cc", "./test/integration/clusters/", - "./test/integration/filters/") + "./test/integration/filters/", "./test/integration/load_balancers/") # Files in these paths can use MessageLite::SerializeAsString SERIALIZE_AS_STRING_ALLOWLIST = ( - "./source/common/config/version_converter.cc", "./source/common/protobuf/utility.cc", "./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc", "./test/common/protobuf/utility_test.cc", - "./test/common/config/version_converter_test.cc", "./test/common/grpc/codec_test.cc", "./test/common/grpc/codec_fuzz_test.cc", "./test/extensions/filters/common/expr/context_test.cc", @@ -97,8 +96,8 @@ "./source/common/common/regex.cc", "./source/common/stats/tag_extractor_impl.h", "./source/common/stats/tag_extractor_impl.cc", "./source/common/formatter/substitution_formatter.cc", - "./source/extensions/filters/http/squash/squash_filter.h", - "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/admin/utils.h", + "./contrib/squash/filters/http/source/squash_filter.h", + "./contrib/squash/filters/http/source/squash_filter.cc", "./source/server/admin/utils.h", "./source/server/admin/utils.cc", "./source/server/admin/stats_handler.h", "./source/server/admin/stats_handler.cc", "./source/server/admin/prometheus_stats.h", "./source/server/admin/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", @@ -176,6 +175,7 @@ HISTOGRAM_SI_SUFFIX_REGEX = re.compile(r"(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)") TEST_NAME_STARTING_LOWER_CASE_REGEX = re.compile(r"TEST(_.\(.*,\s|\()[a-z].*\)\s\{") EXTENSIONS_CODEOWNERS_REGEX = re.compile(r'.*(extensions[^@]*\s+)(@.*)') +CONTRIB_CODEOWNERS_REGEX = re.compile(r'(/contrib/[^@]*\s+)(@.*)') COMMENT_REGEX = re.compile(r"//|\*") DURATION_VALUE_REGEX = re.compile(r'\b[Dd]uration\(([0-9.]+)') PROTO_VALIDATION_STRING = re.compile(r'\bmin_bytes\b') @@ -1086,13 +1086,20 @@ def check_format_visitor(self, arg, dir_name, names): # Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded # manner as it is a small and limited list. source_prefix = './source/' - full_prefix = './source/extensions/' + core_extensions_full_prefix = './source/extensions/' # Check to see if this directory is a subdir under /source/extensions # Also ignore top level directories under /source/extensions since we don't # need owners for source/extensions/access_loggers etc, just the subdirectories. - if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]: + if dir_name.startswith( + core_extensions_full_prefix) and '/' in dir_name[len(core_extensions_full_prefix):]: self.check_owners(dir_name[len(source_prefix):], owned_directories, error_messages) + # For contrib extensions we track ownership at the top level only. + contrib_prefix = './contrib/' + if dir_name.startswith(contrib_prefix): + top_level = pathlib.PurePath('/', *pathlib.PurePath(dir_name).parts[:2], '/') + self.check_owners(str(top_level), owned_directories, error_messages) + for file_name in names: if dir_name.startswith("./api") and self.is_starlark_file(file_name): result = pool.apply_async( @@ -1214,6 +1221,30 @@ def owned_directories(error_messages): "Extensions require at least one maintainer OWNER:\n" " {}".format(line)) + m = CONTRIB_CODEOWNERS_REGEX.search(line) + if m is not None and not line.startswith('#'): + stripped_path = m.group(1).strip() + if not stripped_path.endswith('/'): + error_messages.append( + "Contrib CODEOWNERS entry '{}' must end in '/'".format( + stripped_path)) + continue + + if not (stripped_path.count('/') == 3 or + (stripped_path.count('/') == 4 + and stripped_path.startswith('/contrib/common/'))): + error_messages.append( + "Contrib CODEOWNERS entry '{}' must be 2 directories deep unless in /contrib/common/ and then it can be 3 directories deep" + .format(stripped_path)) + continue + + owned.append(stripped_path) + owners = re.findall('@\S+', m.group(2).strip()) + if len(owners) < 2: + error_messages.append( + "Contrib extensions require at least 2 owners in CODEOWNERS:\n" + " {}".format(line)) + return owned except IOError: return [] # for the check format tests. @@ -1222,9 +1253,15 @@ def owned_directories(error_messages): error_messages = [] owned_directories = owned_directories(error_messages) if os.path.isfile(args.target_path): - if not args.target_path.startswith(EXCLUDED_PREFIXES) and args.target_path.endswith( - SUFFIXES): - error_messages += format_checker.check_format("./" + args.target_path) + # All of our EXCLUDED_PREFIXES start with "./", but the provided + # target path argument might not. Add it here if it is missing, + # and use that normalized path for both lookup and `check_format`. + normalized_target_path = args.target_path + if not normalized_target_path.startswith("./"): + normalized_target_path = "./" + normalized_target_path + if not normalized_target_path.startswith( + EXCLUDED_PREFIXES) and normalized_target_path.endswith(SUFFIXES): + error_messages += format_checker.check_format(normalized_target_path) else: results = [] diff --git a/tools/code_format/envoy_build_fixer.py b/tools/code_format/envoy_build_fixer.py index cfdbf59d24a90..7de2a307889c0 100755 --- a/tools/code_format/envoy_build_fixer.py +++ b/tools/code_format/envoy_build_fixer.py @@ -34,6 +34,7 @@ # Match a load() statement for the envoy_package macros. PACKAGE_LOAD_BLOCK_REGEX = re.compile('("envoy_package".*?\)\n)', re.DOTALL) EXTENSION_PACKAGE_LOAD_BLOCK_REGEX = re.compile('("envoy_extension_package".*?\)\n)', re.DOTALL) +CONTRIB_PACKAGE_LOAD_BLOCK_REGEX = re.compile('("envoy_contrib_package".*?\)\n)', re.DOTALL) # Match Buildozer 'print' output. Example of Buildozer print output: # cc_library json_transcoder_filter_lib [json_transcoder_filter.cc] (missing) (missing) @@ -41,7 +42,7 @@ '\s*([\w_]+)\s+([\w_]+)\s+[(\[](.*?)[)\]]\s+[(\[](.*?)[)\]]\s+[(\[](.*?)[)\]]') # Match API header include in Envoy source file? -API_INCLUDE_REGEX = re.compile('#include "(envoy/.*)/[^/]+\.pb\.(validate\.)?h"') +API_INCLUDE_REGEX = re.compile('#include "(contrib/envoy/.*|envoy/.*)/[^/]+\.pb\.(validate\.)?h"') class EnvoyBuildFixerError(Exception): @@ -79,6 +80,10 @@ def fix_package_and_license(path, contents): regex_to_use = EXTENSION_PACKAGE_LOAD_BLOCK_REGEX package_string = 'envoy_extension_package' + if 'contrib/' in path: + regex_to_use = CONTRIB_PACKAGE_LOAD_BLOCK_REGEX + package_string = 'envoy_contrib_package' + # Ensure we have an envoy_package import load if this is a real Envoy package. We also allow # the prefix to be overridden if envoy is included in a larger workspace. if re.search(ENVOY_RULE_REGEX, contents): diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py index 64f1e296f9560..e3a00f45b0cb5 100755 --- a/tools/code_format/python_check.py +++ b/tools/code_format/python_check.py @@ -13,15 +13,17 @@ # python requires: flake8, yapf # -import os +import argparse +import pathlib import sys from functools import cached_property +from typing import Iterable, List, Optional, Tuple -from flake8.main.application import Application as Flake8Application +from flake8.main.application import Application as Flake8Application # type:ignore -import yapf +import yapf # type:ignore -from tools.base import checker, utils +from tools.base import aio, checker, utils FLAKE8_CONFIG = '.flake8' YAPF_CONFIG = '.style.yapf' @@ -30,12 +32,12 @@ # - isort -class PythonChecker(checker.ForkingChecker): +class PythonChecker(checker.AsyncChecker): checks = ("flake8", "yapf") @property - def diff_file_path(self) -> str: - return self.args.diff_file + def diff_file_path(self) -> Optional[pathlib.Path]: + return pathlib.Path(self.args.diff_file) if self.args.diff_file else None @cached_property def flake8_app(self) -> Flake8Application: @@ -44,12 +46,12 @@ def flake8_app(self) -> Flake8Application: return flake8_app @property - def flake8_args(self) -> list: - return ["--config", self.flake8_config_path, self.path] + def flake8_args(self) -> Tuple[str, ...]: + return ("--config", str(self.flake8_config_path), str(self.path)) @property - def flake8_config_path(self) -> str: - return os.path.join(self.path, FLAKE8_CONFIG) + def flake8_config_path(self) -> pathlib.Path: + return self.path.joinpath(FLAKE8_CONFIG) @property def recurse(self) -> bool: @@ -57,17 +59,17 @@ def recurse(self) -> bool: return self.args.recurse @property - def yapf_config_path(self) -> str: - return os.path.join(self.path, YAPF_CONFIG) + def yapf_config_path(self) -> pathlib.Path: + return self.path.joinpath(YAPF_CONFIG) @property - def yapf_files(self): + def yapf_files(self) -> List[str]: return yapf.file_resources.GetCommandLineFiles( self.args.paths, recursive=self.recurse, - exclude=yapf.file_resources.GetExcludePatternsForDir(self.path)) + exclude=yapf.file_resources.GetExcludePatternsForDir(str(self.path))) - def add_arguments(self, parser) -> None: + def add_arguments(self, parser: argparse.ArgumentParser) -> None: super().add_arguments(parser) parser.add_argument( "--recurse", @@ -78,56 +80,58 @@ def add_arguments(self, parser) -> None: parser.add_argument( "--diff-file", default=None, help="Specify the path to a diff file with fixes") - def check_flake8(self) -> None: + async def check_flake8(self) -> None: """Run flake8 on files and/or repo""" - errors = [] + errors: List[str] = [] with utils.buffered(stdout=errors, mangle=self._strip_lines): self.flake8_app.run_checks() self.flake8_app.report() if errors: self.error("flake8", errors) - def check_yapf(self) -> None: + async def check_yapf(self) -> None: """Run flake8 on files and/or repo""" - for python_file in self.yapf_files: - self.yapf_run(python_file) + futures = aio.concurrent(self.yapf_format(python_file) for python_file in self.yapf_files) - def on_check_run(self, check: str) -> None: + async for (python_file, (reformatted, encoding, changed)) in futures: + self.yapf_result(python_file, reformatted, changed) + + async def on_check_run(self, check: str) -> None: if check not in self.failed and check not in self.warned: self.succeed(check, [check]) - def on_checks_complete(self) -> int: + async def on_checks_complete(self) -> int: if self.diff_file_path and self.has_failed: - result = self.subproc_run(["git", "diff", "HEAD"]) - with open(self.diff_file_path, "wb") as f: - f.write(result.stdout) - return super().on_checks_complete() - - def yapf_format(self, python_file: str) -> tuple: - return yapf.yapf_api.FormatFile( + result = await aio.async_subprocess.run(["git", "diff", "HEAD"], + cwd=self.path, + capture_output=True) + self.diff_file_path.write_bytes(result.stdout) + return await super().on_checks_complete() + + async def yapf_format(self, python_file: str) -> tuple: + return python_file, yapf.yapf_api.FormatFile( python_file, - style_config=self.yapf_config_path, + style_config=str(self.yapf_config_path), in_place=self.fix, print_diff=not self.fix) - def yapf_run(self, python_file: str) -> None: - reformatted_source, encoding, changed = self.yapf_format(python_file) + def yapf_result(self, python_file: str, reformatted: str, changed: bool) -> None: if not changed: return self.succeed("yapf", [python_file]) if self.fix: return self.warn("yapf", [f"{python_file}: reformatted"]) - if reformatted_source: - return self.warn("yapf", [f"{python_file}: diff\n{reformatted_source}"]) + if reformatted: + return self.warn("yapf", [f"{python_file}: diff\n{reformatted}"]) self.error("yapf", [python_file]) - def _strip_line(self, line) -> str: - return line[len(self.path) + 1:] if line.startswith(f"{self.path}/") else line + def _strip_line(self, line: str) -> str: + return line[len(str(self.path)) + 1:] if line.startswith(f"{self.path}/") else line - def _strip_lines(self, lines) -> list: + def _strip_lines(self, lines: Iterable[str]) -> List[str]: return [self._strip_line(line) for line in lines if line] -def main(*args: list) -> None: +def main(*args: str) -> Optional[int]: return PythonChecker(*args).run() diff --git a/tools/code_format/requirements.txt b/tools/code_format/requirements.txt index 1b6d092dbc63d..bb703224ca3ff 100644 --- a/tools/code_format/requirements.txt +++ b/tools/code_format/requirements.txt @@ -18,9 +18,9 @@ mccabe==0.6.1 \ --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f # via flake8 -pep8-naming==0.11.1 \ - --hash=sha256:a1dd47dd243adfe8a83616e27cf03164960b507530f155db94e10b36a6cd6724 \ - --hash=sha256:f43bfe3eea7e0d73e8b5d07d6407ab47f2476ccaeff6937c84275cd30b016738 +pep8-naming==0.12.1 \ + --hash=sha256:4a8daeaeb33cfcde779309fc0c9c0a68a3bbe2ad8a8308b763c5068f86eb9f37 \ + --hash=sha256:bb2455947757d162aa4cad55dba4ce029005cd1692f2899a21d51d8630ca7841 # via -r tools/code_format/requirements.txt pycodestyle==2.7.0 \ --hash=sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068 \ diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py index ab22c8f9089e6..7cf39577d4bb7 100644 --- a/tools/code_format/tests/test_python_check.py +++ b/tools/code_format/tests/test_python_check.py @@ -1,5 +1,6 @@ +import types from contextlib import contextmanager -from unittest.mock import patch, MagicMock, PropertyMock +from unittest.mock import AsyncMock, patch, MagicMock, PropertyMock import pytest @@ -12,12 +13,24 @@ def test_python_checker_constructor(): assert checker.args.paths == ['path1', 'path2', 'path3'] -def test_python_diff_path(): +@pytest.mark.parametrize("diff_path", ["", None, "PATH"]) +def test_python_diff_path(patches, diff_path): checker = python_check.PythonChecker("path1", "path2", "path3") - args_mock = patch("tools.code_format.python_check.PythonChecker.args", new_callable=PropertyMock) + patched = patches( + "pathlib", + ("PythonChecker.args", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") - with args_mock as m_args: - assert checker.diff_file_path == m_args.return_value.diff_file + with patched as (m_plib, m_args): + m_args.return_value.diff_file = diff_path + assert checker.diff_file_path == (m_plib.Path.return_value if diff_path else None) + + if diff_path: + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.diff_file, ), {}]) + else: + assert not m_plib.Path.called def test_python_flake8_app(patches): @@ -48,38 +61,37 @@ def test_python_flake8_args(patches): with patched as (m_flake8_config, m_path): assert ( checker.flake8_args - == ['--config', - m_flake8_config.return_value, m_path.return_value]) + == ('--config', + str(m_flake8_config.return_value), + str(m_path.return_value))) def test_python_flake8_config_path(patches): checker = python_check.PythonChecker("path1", "path2", "path3") patched = patches( ("PythonChecker.path", dict(new_callable=PropertyMock)), - "os.path.join", prefix="tools.code_format.python_check") - with patched as (m_path, m_join): - assert checker.flake8_config_path == m_join.return_value + with patched as (m_path, ): + assert checker.flake8_config_path == m_path.return_value.joinpath.return_value assert ( - list(m_join.call_args) - == [(m_path.return_value, python_check.FLAKE8_CONFIG), {}]) + list(m_path.return_value.joinpath.call_args) + == [(python_check.FLAKE8_CONFIG, ), {}]) def test_python_yapf_config_path(patches): checker = python_check.PythonChecker("path1", "path2", "path3") patched = patches( ("PythonChecker.path", dict(new_callable=PropertyMock)), - "os.path.join", prefix="tools.code_format.python_check") - with patched as (m_path, m_join): - assert checker.yapf_config_path == m_join.return_value + with patched as (m_path, ): + assert checker.yapf_config_path == m_path.return_value.joinpath.return_value assert ( - list(m_join.call_args) - == [(m_path.return_value, python_check.YAPF_CONFIG), {}]) + list(m_path.return_value.joinpath.call_args) + == [(python_check.YAPF_CONFIG, ), {}]) def test_python_yapf_files(patches): @@ -102,12 +114,12 @@ def test_python_yapf_files(patches): 'exclude': m_yapf_exclude.return_value}]) assert ( list(m_yapf_exclude.call_args) - == [(m_path.return_value,), {}]) + == [(str(m_path.return_value),), {}]) def test_python_add_arguments(patches): checker = python_check.PythonChecker("path1", "path2", "path3") - add_mock = patch("tools.code_format.python_check.checker.ForkingChecker.add_arguments") + add_mock = patch("tools.code_format.python_check.checker.AsyncChecker.add_arguments") m_parser = MagicMock() with add_mock as m_add: @@ -126,8 +138,9 @@ def test_python_add_arguments(patches): {'default': None, 'help': 'Specify the path to a diff file with fixes'}]]) +@pytest.mark.asyncio @pytest.mark.parametrize("errors", [[], ["err1", "err2"]]) -def test_python_check_flake8(patches, errors): +async def test_python_check_flake8(patches, errors): checker = python_check.PythonChecker("path1", "path2", "path3") patched = patches( @@ -144,7 +157,7 @@ def mock_buffered(stdout=None, mangle=None): with patched as (m_buffered, m_error, m_mangle, m_flake8_app): m_buffered.side_effect = mock_buffered - checker.check_flake8() + assert not await checker.check_flake8() assert ( list(m_buffered.call_args) @@ -175,37 +188,41 @@ def test_python_check_recurse(): assert "recurse" not in checker.__dict__ -def test_python_check_yapf(patches): +@pytest.mark.asyncio +async def test_python_check_yapf(patches): checker = python_check.PythonChecker("path1", "path2", "path3") patched = patches( - "PythonChecker.yapf_run", + "aio", + ("PythonChecker.yapf_format", dict(new_callable=MagicMock)), + "PythonChecker.yapf_result", ("PythonChecker.yapf_files", dict(new_callable=PropertyMock)), prefix="tools.code_format.python_check") + files = ["file1", "file2", "file3"] + + async def concurrent(iters): + assert isinstance(iters, types.GeneratorType) + for i, format_result in enumerate(iters): + yield (format_result, (f"REFORMAT{i}", f"ENCODING{i}", f"CHANGED{i}")) - with patched as (m_yapf_run, m_yapf_files): - m_yapf_files.return_value = ["file1", "file2", "file3"] - checker.check_yapf() + with patched as (m_aio, m_yapf_format, m_yapf_result, m_yapf_files): + m_yapf_files.return_value = files + m_aio.concurrent.side_effect = concurrent + assert not await checker.check_yapf() assert ( - list(list(c) for c in m_yapf_files.call_args_list) - == [[(), {}]]) + list(list(c) for c in m_yapf_format.call_args_list) + == [[(file,), {}] for file in files]) assert ( - list(list(c) for c in m_yapf_run.call_args_list) - == [[('file1',), {}], [('file2',), {}], [('file3',), {}]]) + list(list(c) for c in m_yapf_result.call_args_list) + == [[(m_yapf_format.return_value, f"REFORMAT{i}", f"CHANGED{i}"), {}] for i, _ in enumerate(files)]) -TEST_CHECK_RESULTS = ( - ("check1", [], []), - ("check1", ["check2", "check3"], ["check4", "check5"]), - ("check1", ["check1", "check3"], ["check4", "check5"]), - ("check1", ["check2", "check3"], ["check1", "check5"]), - ("check1", ["check1", "check3"], ["check1", "check5"])) - - -@pytest.mark.parametrize("results", TEST_CHECK_RESULTS) -def test_python_on_check_run(patches, results): +@pytest.mark.asyncio +@pytest.mark.parametrize("errors", [[], ["check2", "check3"], ["check1", "check3"]]) +@pytest.mark.parametrize("warnings", [[], ["check4", "check5"], ["check1", "check5"]]) +async def test_python_on_check_run(patches, errors, warnings): checker = python_check.PythonChecker("path1", "path2", "path3") - checkname, errors, warnings = results + checkname = "check1" patched = patches( "PythonChecker.succeed", ("PythonChecker.name", dict(new_callable=PropertyMock)), @@ -216,7 +233,7 @@ def test_python_on_check_run(patches, results): with patched as (m_succeed, m_name, m_failed, m_warned): m_failed.return_value = errors m_warned.return_value = warnings - checker.on_check_run(checkname) + assert not await checker.on_check_run(checkname) if checkname in warnings or checkname in errors: assert not m_succeed.called @@ -226,51 +243,45 @@ def test_python_on_check_run(patches, results): == [(checkname, [checkname]), {}]) -TEST_CHECKS_COMPLETE = ( - ("DIFF1", False), - ("DIFF1", True), - ("", False), - ("", True)) - - -@pytest.mark.parametrize("results", TEST_CHECKS_COMPLETE) -def test_python_on_checks_complete(patches, results): +@pytest.mark.asyncio +@pytest.mark.parametrize("diff_path", ["", "DIFF1"]) +@pytest.mark.parametrize("failed", [True, False]) +async def test_python_on_checks_complete(patches, diff_path, failed): checker = python_check.PythonChecker("path1", "path2", "path3") - diff_path, failed = results patched = patches( - "open", - "checker.ForkingChecker.subproc_run", - "checker.Checker.on_checks_complete", + "aio", + ("checker.AsyncChecker.on_checks_complete", dict(new_callable=AsyncMock)), ("PythonChecker.diff_file_path", dict(new_callable=PropertyMock)), ("PythonChecker.has_failed", dict(new_callable=PropertyMock)), + ("PythonChecker.path", dict(new_callable=PropertyMock)), prefix="tools.code_format.python_check") - with patched as (m_open, m_fork, m_super, m_diff, m_failed): - m_diff.return_value = diff_path + with patched as (m_aio, m_super, m_diff, m_failed, m_path): + m_aio.async_subprocess.run = AsyncMock() + if not diff_path: + m_diff.return_value = None m_failed.return_value = failed - assert checker.on_checks_complete() == m_super.return_value + assert await checker.on_checks_complete() == m_super.return_value if diff_path and failed: assert ( - list(m_fork.call_args) - == [(['git', 'diff', 'HEAD'],), {}]) - assert ( - list(m_open.call_args) - == [(diff_path, 'wb'), {}]) + list(m_aio.async_subprocess.run.call_args) + == [(['git', 'diff', 'HEAD'],), + dict(capture_output=True, cwd=m_path.return_value)]) assert ( - list(m_open.return_value.__enter__.return_value.write.call_args) - == [(m_fork.return_value.stdout,), {}]) + list(m_diff.return_value.write_bytes.call_args) + == [(m_aio.async_subprocess.run.return_value.stdout,), {}]) else: - assert not m_fork.called - assert not m_open.called + assert not m_aio.async_subprocess.run.called assert ( list(m_super.call_args) == [(), {}]) +@pytest.mark.asyncio @pytest.mark.parametrize("fix", [True, False]) -def test_python_yapf_format(patches, fix): +async def test_python_yapf_format(patches, fix): checker = python_check.PythonChecker("path1", "path2", "path3") patched = patches( "yapf.yapf_api.FormatFile", @@ -280,12 +291,12 @@ def test_python_yapf_format(patches, fix): with patched as (m_format, m_config, m_fix): m_fix.return_value = fix - assert checker.yapf_format("FILENAME") == m_format.return_value + assert await checker.yapf_format("FILENAME") == ("FILENAME", m_format.return_value) assert ( list(m_format.call_args) == [('FILENAME',), - {'style_config': m_config.return_value, + {'style_config': str(m_config.return_value), 'in_place': fix, 'print_diff': not fix}]) assert ( @@ -293,30 +304,21 @@ def test_python_yapf_format(patches, fix): == [[(), {}], [(), {}]]) -TEST_FORMAT_RESULTS = ( - ("", "", True), - ("", "", False), - ("REFORMAT", "", True), - ("REFORMAT", "", False)) - - -@pytest.mark.parametrize("format_results", TEST_FORMAT_RESULTS) +@pytest.mark.parametrize("reformatted", ["", "REFORMAT"]) @pytest.mark.parametrize("fix", [True, False]) -def test_python_yapf_run(patches, fix, format_results): +@pytest.mark.parametrize("changed", [True, False]) +def test_python_yapf_result(patches, reformatted, fix, changed): checker = python_check.PythonChecker("path1", "path2", "path3") - reformat, encoding, changed = format_results patched = patches( - "PythonChecker.yapf_format", "PythonChecker.succeed", "PythonChecker.warn", "PythonChecker.error", ("PythonChecker.fix", dict(new_callable=PropertyMock)), prefix="tools.code_format.python_check") - with patched as (m_format, m_succeed, m_warn, m_error, m_fix): + with patched as (m_succeed, m_warn, m_error, m_fix): m_fix.return_value = fix - m_format.return_value = format_results - checker.yapf_run("FILENAME") + checker.yapf_result("FILENAME", reformatted, changed) if not changed: assert ( @@ -334,12 +336,12 @@ def test_python_yapf_run(patches, fix, format_results): list(m_warn.call_args) == [('yapf', [f'FILENAME: reformatted']), {}]) return - if reformat: + if reformatted: assert not m_error.called assert len(m_warn.call_args_list) == 1 assert ( list(m_warn.call_args) - == [('yapf', [f'FILENAME: diff\n{reformat}']), {}]) + == [('yapf', [f'FILENAME: diff\n{reformatted}']), {}]) return assert not m_warn.called assert ( diff --git a/tools/dependency/cve_scan.py b/tools/dependency/cve_scan.py index 85c4e7a0264df..ddb4663cb10ef 100755 --- a/tools/dependency/cve_scan.py +++ b/tools/dependency/cve_scan.py @@ -61,6 +61,11 @@ # False positive on the match heuristic, fixed in Curl 7.76.0. 'CVE-2021-22876', 'CVE-2021-22890', + # Node.js issues unrelated to http-parser. + # See https://nvd.nist.gov/vuln/detail/CVE-2021-22918 + # See https://nvd.nist.gov/vuln/detail/CVE-2021-22921 + 'CVE-2021-22918', + 'CVE-2021-22921', ]) # Subset of CVE fields that are useful below. diff --git a/tools/dependency/pip_check.py b/tools/dependency/pip_check.py index c924c44936549..91a8456fc2854 100755 --- a/tools/dependency/pip_check.py +++ b/tools/dependency/pip_check.py @@ -11,9 +11,9 @@ # ./tools/dependency/pip_check.py -h # -import os import sys from functools import cached_property +from typing import Iterable, Set from tools.base import checker, utils @@ -25,6 +25,10 @@ # - pip-compile formatting +class PipConfigurationError(Exception): + pass + + class PipChecker(checker.Checker): checks = ("dependabot",) _dependabot_config = DEPENDABOT_CONFIG @@ -41,19 +45,22 @@ def config_requirements(self) -> set: @cached_property def dependabot_config(self) -> dict: """Parsed dependabot config""" - return utils.from_yaml(os.path.join(self.path, self.dependabot_config_path)) + result = utils.from_yaml(self.path.joinpath(self.dependabot_config_path)) + if not isinstance(result, dict): + raise PipConfigurationError( + f"Unable to parse dependabot config: {self.dependabot_config_path}") + return result @property def dependabot_config_path(self) -> str: return self._dependabot_config @cached_property - def requirements_dirs(self) -> set: + def requirements_dirs(self) -> Set[str]: """Set of found directories in the repo containing requirements.txt""" return set( - root[len(self.path):] - for root, dirs, files in os.walk(self.path) - if self.requirements_filename in files) + f"/{f.parent.relative_to(self.path)}" for f in self.path.glob("**/*") + if f.name == self.requirements_filename) @property def requirements_filename(self) -> str: @@ -75,12 +82,12 @@ def check_dependabot(self) -> None: missing_config, f"Missing dependabot config for {self.requirements_filename} in dir") - def dependabot_success(self, correct: list) -> None: + def dependabot_success(self, correct: Iterable) -> None: self.succeed( "dependabot", ([f"{self.requirements_filename}: {dirname}" for dirname in sorted(correct)])) - def dependabot_errors(self, missing: list, msg: str) -> None: + def dependabot_errors(self, missing: Iterable, msg: str) -> None: for dirname in sorted(missing): self.error("dependabot", [f"{msg}: {dirname}"]) diff --git a/tools/dependency/requirements.txt b/tools/dependency/requirements.txt index 978eb224c37f9..1d841a10db6dc 100644 --- a/tools/dependency/requirements.txt +++ b/tools/dependency/requirements.txt @@ -5,8 +5,8 @@ # pip-compile --generate-hashes tools/dependency/requirements.txt # certifi==2021.5.30 \ - --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ - --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee + --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ + --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 # via # -r tools/dependency/requirements.txt # requests @@ -54,9 +54,11 @@ cffi==1.14.5 \ chardet==4.0.0 \ --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via - # -r tools/dependency/requirements.txt - # requests + # via -r tools/dependency/requirements.txt +charset-normalizer==2.0.4 \ + --hash=sha256:0c8911edd15d19223366a194a513099a302055a962bca2cec0f54b8b63175d8b \ + --hash=sha256:f23667ebe1084be45f6ae0538e4a5a865206544097e4e8bbcacf42cd02a348f3 + # via requests colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 @@ -74,8 +76,8 @@ idna==2.10 \ # -r tools/dependency/requirements.txt # requests packaging==21.0 \ - --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \ - --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 + --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ + --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 # via -r tools/dependency/requirements.txt pycparser==2.20 \ --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ @@ -121,16 +123,18 @@ pyparsing==2.4.7 \ # via # -r tools/dependency/requirements.txt # packaging -requests==2.25.1 \ - --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \ - --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e +requests==2.26.0 \ + --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ + --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 # via # -r tools/dependency/requirements.txt # pygithub six==1.16.0 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 - # via pynacl + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via + # -r tools/dependency/requirements.txt + # pynacl urllib3==1.26.6 \ --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f diff --git a/tools/dependency/tests/test_pip_check.py b/tools/dependency/tests/test_pip_check.py index 1fa7171573015..0c3458626cc89 100644 --- a/tools/dependency/tests/test_pip_check.py +++ b/tools/dependency/tests/test_pip_check.py @@ -1,4 +1,4 @@ -from unittest.mock import patch, PropertyMock +from unittest.mock import MagicMock, patch, PropertyMock import pytest @@ -31,51 +31,67 @@ def test_pip_checker_config_requirements(): == [('updates',), {}]) -def test_pip_checker_dependabot_config(patches): +@pytest.mark.parametrize("isdict", [True, False]) +def test_pip_checker_dependabot_config(patches, isdict): checker = pip_check.PipChecker("path1", "path2", "path3") patched = patches( "utils", ("PipChecker.path", dict(new_callable=PropertyMock)), - "os.path.join", prefix="tools.dependency.pip_check") - with patched as (m_utils, m_path, m_join): - assert checker.dependabot_config == m_utils.from_yaml.return_value + with patched as (m_utils, m_path): + if isdict: + m_utils.from_yaml.return_value = {} + + if isdict: + assert checker.dependabot_config == m_utils.from_yaml.return_value + else: + with pytest.raises(pip_check.PipConfigurationError) as e: + checker.dependabot_config + + assert ( + e.value.args[0] + == f'Unable to parse dependabot config: {checker.dependabot_config_path}') assert ( - list(m_join.call_args) - == [(m_path.return_value, checker._dependabot_config), {}]) + list(m_path.return_value.joinpath.call_args) + == [(checker._dependabot_config, ), {}]) assert ( list(m_utils.from_yaml.call_args) - == [(m_join.return_value,), {}]) + == [(m_path.return_value.joinpath.return_value,), {}]) def test_pip_checker_requirements_dirs(patches): checker = pip_check.PipChecker("path1", "path2", "path3") - - dummy_walker = [ - ["ROOT1", ["DIR1", "DIR2"], ["FILE1", "FILE2", "FILE3"]], - ["ROOT2", ["DIR1", "DIR2"], ["FILE1", "FILE2", "REQUIREMENTS_FILE", "FILE3"]], - ["ROOT3", ["DIR1", "DIR2"], ["FILE1", "FILE2", "REQUIREMENTS_FILE", "FILE3"]], - ["ROOT4", ["DIR1", "DIR2"], ["FILE1", "FILE2", "FILE3"]]] - + dummy_glob = [ + "FILE1", "FILE2", "FILE3", + "REQUIREMENTS_FILE", "FILE4", + "REQUIREMENTS_FILE", "FILE5"] patched = patches( ("PipChecker.requirements_filename", dict(new_callable=PropertyMock)), ("PipChecker.path", dict(new_callable=PropertyMock)), - "os.walk", prefix="tools.dependency.pip_check") + expected = [] - with patched as (m_reqs, m_path, m_walk): + with patched as (m_reqs, m_path): m_reqs.return_value = "REQUIREMENTS_FILE" - m_path.return_value = "ROO" - m_walk.return_value = dummy_walker - assert checker.requirements_dirs == {'T3', 'T2'} + _glob = [] - assert m_reqs.called - assert m_path.called - assert ( - list(m_walk.call_args) - == [('ROO',), {}]) + for fname in dummy_glob: + _mock = MagicMock() + _mock.name = fname + if fname == "REQUIREMENTS_FILE": + expected.append(_mock) + _glob.append(_mock) + + m_path.return_value.glob.return_value = _glob + assert checker.requirements_dirs == {f"/{f.parent.relative_to.return_value}" for f in expected} + + for exp in expected: + assert ( + list(exp.parent.relative_to.call_args) + == [(m_path.return_value,), {}]) + assert "requirements_dirs" in checker.__dict__ TEST_REQS = ( diff --git a/tools/dependency/validate.py b/tools/dependency/validate.py index 032852e31a6e6..100c104cb26f3 100755 --- a/tools/dependency/validate.py +++ b/tools/dependency/validate.py @@ -122,7 +122,7 @@ def query_external_deps(self, *targets): Returns: A set of dependency identifiers that are reachable from targets. """ - deps_query = ' union '.join(f'deps({l})' for l in targets) + deps_query = 'deps(set({}))'.format(' '.join(targets)) try: deps = subprocess.check_output(['bazel', 'query', deps_query], stderr=subprocess.PIPE).decode().splitlines() @@ -259,8 +259,8 @@ def validate_control_plane_deps(self): if len(bad_controlplane_core_deps) > 0: raise DependencyError( f'Observed controlplane core deps {queried_controlplane_core_min_deps} is not covered ' - 'by "use_category" implied core deps {expected_controlplane_core_deps}: ' - '{bad_controlplane_core_deps} are missing') + f'by "use_category" implied core deps {expected_controlplane_core_deps}: ' + f'{bad_controlplane_core_deps} are missing') def validate_extension_deps(self, name, target): """Validate that extensions are correctly declared for dataplane_ext and observability_ext. diff --git a/tools/distribution/BUILD b/tools/distribution/BUILD index 6778780e56817..11142e4324f0e 100644 --- a/tools/distribution/BUILD +++ b/tools/distribution/BUILD @@ -1,15 +1,35 @@ load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_binary") +load("//tools/base:envoy_python.bzl", "envoy_py_script") +load("@distribution_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 envoy_package() -envoy_py_binary( +exports_files([ + "distrotest.sh", +]) + +envoy_py_script( + name = "tools.distribution.release", + entry_point = "envoy.distribution.release", + deps = [ + requirement("envoy.distribution.release"), + ], +) + +envoy_py_script( name = "tools.distribution.sign", + entry_point = "envoy.gpg.sign", + deps = [ + requirement("envoy.gpg.sign"), + ], +) + +envoy_py_script( + name = "tools.distribution.verify", + entry_point = "envoy.distribution.verify", deps = [ - "//tools/base:runner", - "//tools/base:utils", - "//tools/gpg:identity", + requirement("envoy.distribution.verify"), ], ) diff --git a/tools/distribution/distrotest.sh b/tools/distribution/distrotest.sh new file mode 100755 index 0000000000000..0b48de956a213 --- /dev/null +++ b/tools/distribution/distrotest.sh @@ -0,0 +1,115 @@ +#!/bin/bash -E + +FAILED=() +TESTNAME= + + +dump_envoy_response () { + echo "Envoy did not respond correctly" + echo "Response was" + echo "$RESPONSE" + echo + echo "Log:" + cat /tmp/envoy.log +} + +dump_permissions () { + echo "Actual permissions for: $1" + stat -L -c "%a %G %U" "$1" +} + +handle_fail () { + run_log "${TESTNAME}" "ERROR" + case "${TESTNAME}" in + "proxy-responds") + dump_envoy_response + ;; + "binary-permissions") + dump_permissions /usr/bin/envoy + ;; + "config-permissions") + dump_permissions /etc/envoy/envoy.yaml + ;; + esac + return 1 +} + + +trap_errors () { + if [[ -n "$TESTNAME" ]]; then + handle_fail + FAILED+=("$TESTNAME") + fi +} + +run_log () { + TESTNAME="$1" + shift + echo -e "[${DISTRO}/${PACKAGE}:${TESTNAME}] ${*}" +} + +trap trap_errors ERR +trap exit 1 INT + +run_log package-sig "Check package signature" +$VERIFY_COMMAND "${ENVOY_INSTALLABLE}" + +run_log package-maintainer "Check package maintainer" +$MAINTAINER_COMMAND | grep "$ENVOY_MAINTAINER" + +run_log install-envoy "Install Envoy" +$INSTALL_COMMAND "${ENVOY_INSTALLABLE}" && echo "Envoy installed" + +run_log group-exists "Check envoy group exists" +getent group envoy + +run_log user-exists "Check envoy user exists" +getent passwd envoy + +run_log shadow-no-password "Check envoy user has no password" +getent shadow envoy | grep -E '^envoy:!!:|^envoy:!:' + +run_log user-in-group "Check envoy user is in envoy group" +sudo -u envoy groups | grep envoy + +run_log user-home-dir "Check envoy user home directory" +getent passwd envoy | cut -d":" -f6 | grep "/nonexistent" + +run_log user-shell "Check envoy user shell" +getent passwd envoy | cut -d":" -f7 | grep "/bin/false" + +run_log binary-permissions "Check ownership/permissons of envoy binary" +test "$(stat -L -c "%a %G %U" /usr/bin/envoy)" == "$BINARY_PERMISSIONS" && echo "Correct permissions: ${BINARY_PERMISSIONS}" + +run_log config-permissions "Check ownership/permissons of envoy config" +test "$(stat -L -c "%a %G %U" /etc/envoy/envoy.yaml)" == "$CONFIG_PERMISSIONS" && echo "Correct permissions: ${CONFIG_PERMISSIONS}" + +run_log envoy-version "Envoy version" +envoy --version | grep "$ENVOY_VERSION" + +run_log start-envoy "Start Envoy" +# shellcheck disable=SC2024 +sudo -u envoy envoy -c /etc/envoy/envoy.yaml &> /tmp/envoy.log & echo "Envoy started" + +run_log wait-for-envoy "Wait for Envoy starting" +sleep 2 + +run_log envoy-running "Check envoy is running" +pgrep envoy + +run_log proxy-responds "Check proxy responds" +RESPONSE=$(curl -s http://localhost:10000/) +echo "$RESPONSE" | grep "Welcome to Envoy" + +run_log stop-envoy "Stop envoy" +sudo -u envoy pkill envoy && echo "Envoy stopped" + +run_log uninstall-envoy "Uninstall envoy" +$UNINSTALL_COMMAND "$PACKAGE" + +run_log reinstall-envoy "Reinstall envoy" +$INSTALL_COMMAND "${ENVOY_INSTALLABLE}" && echo "Envoy reinstalled" + +if [[ "${#FAILED[@]}" -ne "0" ]]; then + exit 1 +fi diff --git a/tools/distribution/requirements.txt b/tools/distribution/requirements.txt new file mode 100644 index 0000000000000..dcd1a7600ba2f --- /dev/null +++ b/tools/distribution/requirements.txt @@ -0,0 +1,382 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile --generate-hashes tools/distribution/requirements.txt +# +abstracts==0.0.12 \ + --hash=sha256:acc01ff56c8a05fb88150dff62e295f9071fc33388c42f1dfc2787a8d1c755ff + # via + # aio.functional + # envoy.abstract.command + # envoy.github.abstract + # envoy.github.release +aio.functional==0.0.9 \ + --hash=sha256:824a997a394ad891bc9f403426babc13c9d0d1f4d1708c38e77d6aecae1cab1d + # via + # aio.tasks + # envoy.github.abstract + # envoy.github.release +aio.stream==0.0.2 \ + --hash=sha256:6f5baaff48f6319db134cd56c06ccf89db1f7c5f67a26382e081efc96f2f675d + # via envoy.github.release +aio.tasks==0.0.4 \ + --hash=sha256:9abd4b0881edb292c4f91a2f63b1dea7a9829a4bd4e8440225a1a412a90461fc + # via + # envoy.github.abstract + # envoy.github.release +aiodocker==0.21.0 \ + --hash=sha256:1f2e6db6377195962bb676d4822f6e3a0c525e1b5d60b8ebbab68230bff3d227 \ + --hash=sha256:6fe00135bb7dc40a407669d3157ecdfd856f3737d939df54f40a479d40cf7bdc + # via + # envoy.distribution.distrotest + # envoy.docker.utils +aiofiles==0.7.0 \ + --hash=sha256:a1c4fc9b2ff81568c83e21392a82f344ea9d23da906e4f6a52662764545e19d4 \ + --hash=sha256:c67a6823b5f23fcab0a2595a289cec7d8c863ffcb4322fb8cd6b90400aedfdbc + # via aio.stream +aiohttp==3.7.4.post0 \ + --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \ + --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \ + --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \ + --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \ + --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \ + --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \ + --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \ + --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \ + --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \ + --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \ + --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \ + --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \ + --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \ + --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf \ + --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \ + --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \ + --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \ + --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \ + --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \ + --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \ + --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \ + --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \ + --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \ + --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \ + --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \ + --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \ + --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \ + --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \ + --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \ + --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \ + --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \ + --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \ + --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \ + --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \ + --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \ + --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \ + --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95 + # via + # aio.stream + # aiodocker + # envoy.github.abstract + # envoy.github.release +async-timeout==3.0.1 \ + --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ + --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 + # via aiohttp +attrs==21.2.0 \ + --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ + --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb + # via aiohttp +cffi==1.14.6 \ + --hash=sha256:06c54a68935738d206570b20da5ef2b6b6d92b38ef3ec45c5422c0ebaf338d4d \ + --hash=sha256:0c0591bee64e438883b0c92a7bed78f6290d40bf02e54c5bf0978eaf36061771 \ + --hash=sha256:19ca0dbdeda3b2615421d54bef8985f72af6e0c47082a8d26122adac81a95872 \ + --hash=sha256:22b9c3c320171c108e903d61a3723b51e37aaa8c81255b5e7ce102775bd01e2c \ + --hash=sha256:26bb2549b72708c833f5abe62b756176022a7b9a7f689b571e74c8478ead51dc \ + --hash=sha256:33791e8a2dc2953f28b8d8d300dde42dd929ac28f974c4b4c6272cb2955cb762 \ + --hash=sha256:3c8d896becff2fa653dc4438b54a5a25a971d1f4110b32bd3068db3722c80202 \ + --hash=sha256:4373612d59c404baeb7cbd788a18b2b2a8331abcc84c3ba40051fcd18b17a4d5 \ + --hash=sha256:487d63e1454627c8e47dd230025780e91869cfba4c753a74fda196a1f6ad6548 \ + --hash=sha256:48916e459c54c4a70e52745639f1db524542140433599e13911b2f329834276a \ + --hash=sha256:4922cd707b25e623b902c86188aca466d3620892db76c0bdd7b99a3d5e61d35f \ + --hash=sha256:55af55e32ae468e9946f741a5d51f9896da6b9bf0bbdd326843fec05c730eb20 \ + --hash=sha256:57e555a9feb4a8460415f1aac331a2dc833b1115284f7ded7278b54afc5bd218 \ + --hash=sha256:5d4b68e216fc65e9fe4f524c177b54964af043dde734807586cf5435af84045c \ + --hash=sha256:64fda793737bc4037521d4899be780534b9aea552eb673b9833b01f945904c2e \ + --hash=sha256:6d6169cb3c6c2ad50db5b868db6491a790300ade1ed5d1da29289d73bbe40b56 \ + --hash=sha256:7bcac9a2b4fdbed2c16fa5681356d7121ecabf041f18d97ed5b8e0dd38a80224 \ + --hash=sha256:80b06212075346b5546b0417b9f2bf467fea3bfe7352f781ffc05a8ab24ba14a \ + --hash=sha256:818014c754cd3dba7229c0f5884396264d51ffb87ec86e927ef0be140bfdb0d2 \ + --hash=sha256:8eb687582ed7cd8c4bdbff3df6c0da443eb89c3c72e6e5dcdd9c81729712791a \ + --hash=sha256:99f27fefe34c37ba9875f224a8f36e31d744d8083e00f520f133cab79ad5e819 \ + --hash=sha256:9f3e33c28cd39d1b655ed1ba7247133b6f7fc16fa16887b120c0c670e35ce346 \ + --hash=sha256:a8661b2ce9694ca01c529bfa204dbb144b275a31685a075ce123f12331be790b \ + --hash=sha256:a9da7010cec5a12193d1af9872a00888f396aba3dc79186604a09ea3ee7c029e \ + --hash=sha256:aedb15f0a5a5949ecb129a82b72b19df97bbbca024081ed2ef88bd5c0a610534 \ + --hash=sha256:b315d709717a99f4b27b59b021e6207c64620790ca3e0bde636a6c7f14618abb \ + --hash=sha256:ba6f2b3f452e150945d58f4badd92310449876c4c954836cfb1803bdd7b422f0 \ + --hash=sha256:c33d18eb6e6bc36f09d793c0dc58b0211fccc6ae5149b808da4a62660678b156 \ + --hash=sha256:c9a875ce9d7fe32887784274dd533c57909b7b1dcadcc128a2ac21331a9765dd \ + --hash=sha256:c9e005e9bd57bc987764c32a1bee4364c44fdc11a3cc20a40b93b444984f2b87 \ + --hash=sha256:d2ad4d668a5c0645d281dcd17aff2be3212bc109b33814bbb15c4939f44181cc \ + --hash=sha256:d950695ae4381ecd856bcaf2b1e866720e4ab9a1498cba61c602e56630ca7195 \ + --hash=sha256:e22dcb48709fc51a7b58a927391b23ab37eb3737a98ac4338e2448bef8559b33 \ + --hash=sha256:e8c6a99be100371dbb046880e7a282152aa5d6127ae01783e37662ef73850d8f \ + --hash=sha256:e9dc245e3ac69c92ee4c167fbdd7428ec1956d4e754223124991ef29eb57a09d \ + --hash=sha256:eb687a11f0a7a1839719edd80f41e459cc5366857ecbed383ff376c4e3cc6afd \ + --hash=sha256:eb9e2a346c5238a30a746893f23a9535e700f8192a68c07c0258e7ece6ff3728 \ + --hash=sha256:ed38b924ce794e505647f7c331b22a693bee1538fdf46b0222c4717b42f744e7 \ + --hash=sha256:f0010c6f9d1a4011e429109fda55a225921e3206e7f62a0c22a35344bfd13cca \ + --hash=sha256:f0c5d1acbfca6ebdd6b1e3eded8d261affb6ddcf2186205518f1428b8569bb99 \ + --hash=sha256:f10afb1004f102c7868ebfe91c28f4a712227fe4cb24974350ace1f90e1febbf \ + --hash=sha256:f174135f5609428cc6e1b9090f9268f5c8935fddb1b25ccb8255a2d50de6789e \ + --hash=sha256:f3ebe6e73c319340830a9b2825d32eb6d8475c1dac020b4f0aa774ee3b898d1c \ + --hash=sha256:f627688813d0a4140153ff532537fbe4afea5a3dffce1f9deb7f91f848a832b5 \ + --hash=sha256:fd4305f86f53dfd8cd3522269ed7fc34856a8ee3709a5e28b2836b2db9d4cd69 + # via cryptography +chardet==4.0.0 \ + --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ + --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 + # via aiohttp +coloredlogs==15.0.1 \ + --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \ + --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0 + # via envoy.base.runner +cryptography==3.4.8 \ + --hash=sha256:0a7dcbcd3f1913f664aca35d47c1331fce738d44ec34b7be8b9d332151b0b01e \ + --hash=sha256:1eb7bb0df6f6f583dd8e054689def236255161ebbcf62b226454ab9ec663746b \ + --hash=sha256:21ca464b3a4b8d8e86ba0ee5045e103a1fcfac3b39319727bc0fc58c09c6aff7 \ + --hash=sha256:34dae04a0dce5730d8eb7894eab617d8a70d0c97da76b905de9efb7128ad7085 \ + --hash=sha256:3520667fda779eb788ea00080124875be18f2d8f0848ec00733c0ec3bb8219fc \ + --hash=sha256:3fa3a7ccf96e826affdf1a0a9432be74dc73423125c8f96a909e3835a5ef194a \ + --hash=sha256:5b0fbfae7ff7febdb74b574055c7466da334a5371f253732d7e2e7525d570498 \ + --hash=sha256:8695456444f277af73a4877db9fc979849cd3ee74c198d04fc0776ebc3db52b9 \ + --hash=sha256:94cc5ed4ceaefcbe5bf38c8fba6a21fc1d365bb8fb826ea1688e3370b2e24a1c \ + --hash=sha256:94fff993ee9bc1b2440d3b7243d488c6a3d9724cc2b09cdb297f6a886d040ef7 \ + --hash=sha256:9965c46c674ba8cc572bc09a03f4c649292ee73e1b683adb1ce81e82e9a6a0fb \ + --hash=sha256:a00cf305f07b26c351d8d4e1af84ad7501eca8a342dedf24a7acb0e7b7406e14 \ + --hash=sha256:a305600e7a6b7b855cd798e00278161b681ad6e9b7eca94c721d5f588ab212af \ + --hash=sha256:cd65b60cfe004790c795cc35f272e41a3df4631e2fb6b35aa7ac6ef2859d554e \ + --hash=sha256:d2a6e5ef66503da51d2110edf6c403dc6b494cc0082f85db12f54e9c5d4c3ec5 \ + --hash=sha256:d9ec0e67a14f9d1d48dd87a2531009a9b251c02ea42851c060b25c782516ff06 \ + --hash=sha256:f44d141b8c4ea5eb4dbc9b3ad992d45580c1d22bf5e24363f2fbf50c2d7ae8a7 + # via pyjwt +envoy.abstract.command==0.0.3 \ + --hash=sha256:4b7b15c91bea1f2eb7c2e8e35f95cd9437e1c8f151adc093bf7858fc85d48221 + # via + # envoy.base.runner + # envoy.distribution.release +envoy.base.checker==0.0.2 \ + --hash=sha256:2ac81efa20fd01fff644ff7dc7fadeac1c3e4dbb6210881ac7a7919ec0e048d8 + # via + # envoy.distribution.distrotest + # envoy.distribution.verify +envoy.base.runner==0.0.4 \ + --hash=sha256:4eeb2b661f1f0c402df4425852be554a8a83ef5d338bfae69ddcb9b90755379e + # via + # envoy.base.checker + # envoy.distribution.release + # envoy.github.abstract + # envoy.gpg.sign +envoy.base.utils==0.0.6 \ + --hash=sha256:58ed057137ebe80d78db90997efc59822115ee616e435a9afc3d7a19069bb53c + # via + # envoy.distribution.distrotest + # envoy.github.release + # envoy.gpg.sign +envoy.distribution.distrotest==0.0.3 \ + --hash=sha256:c094adbd959eb1336f93afc00aedb7ee4e68e8252e2365be816a6f9ede8a3de7 + # via envoy.distribution.verify +envoy.distribution.release==0.0.4 \ + --hash=sha256:41037e0488f0593ce5173739fe0cd1b45a4775f5a47738b85d9d04024ca241a2 + # via -r tools/distribution/requirements.txt +envoy.distribution.verify==0.0.2 \ + --hash=sha256:ae59134085de50203edf51c243dbf3301cbe5550db29f0ec6f9ea1c3b82fee1c + # via -r tools/distribution/requirements.txt +envoy.docker.utils==0.0.2 \ + --hash=sha256:a12cb57f0b6e204d646cbf94f927b3a8f5a27ed15f60d0576176584ec16a4b76 + # via envoy.distribution.distrotest +envoy.github.abstract==0.0.16 \ + --hash=sha256:badf04104492fb6b37ba2163f2b225132ed04aba680beb218e7c7d918564f8ee + # via + # envoy.distribution.release + # envoy.github.release +envoy.github.release==0.0.8 \ + --hash=sha256:fbc4354030137eb565b8c4d679965e4ef60b01de0c09310441836e592ca0cd19 + # via envoy.distribution.release +envoy.gpg.identity==0.0.2 \ + --hash=sha256:7d32ff9133e00b9974b4dabd2512b4872b091b8c5069d0112240dcc1a56bc406 + # via envoy.gpg.sign +envoy.gpg.sign==0.0.3 \ + --hash=sha256:31667931f5d7ff05fd809b89748f277511486311c777652af4cb8889bd641049 + # via -r tools/distribution/requirements.txt +frozendict==2.0.6 \ + --hash=sha256:3f00de72805cf4c9e81b334f3f04809278b967d2fed84552313a0fcce511beb1 \ + --hash=sha256:5d3f75832c35d4df041f0e19c268964cbef29c1eb34cd3517cf883f1c2d089b9 + # via envoy.base.runner +gidgethub==5.0.1 \ + --hash=sha256:3efbd6998600254ec7a2869318bd3ffde38edc3a0d37be0c14bc46b45947b682 \ + --hash=sha256:67245e93eb0918b37df038148af675df43b62e832c529d7f859f6b90d9f3e70d + # via + # envoy.github.abstract + # envoy.github.release +humanfriendly==9.2 \ + --hash=sha256:332da98c24cc150efcc91b5508b19115209272bfdf4b0764a56795932f854271 \ + --hash=sha256:f7dba53ac7935fd0b4a2fc9a29e316ddd9ea135fb3052d3d0279d10c18ff9c48 + # via coloredlogs +idna==3.2 \ + --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ + --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 + # via yarl +multidict==5.1.0 \ + --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ + --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ + --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ + --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ + --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ + --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ + --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ + --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \ + --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ + --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ + --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ + --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ + --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ + --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ + --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ + --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ + --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ + --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ + --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ + --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ + --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ + --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ + --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ + --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ + --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ + --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ + --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ + --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ + --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ + --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ + --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ + --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ + --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ + --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ + --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ + --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ + --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 + # via + # aiohttp + # yarl +packaging==21.0 \ + --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ + --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 + # via envoy.github.release +pycparser==2.20 \ + --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ + --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 + # via cffi +pyjwt[crypto]==2.1.0 \ + --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ + --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 + # via gidgethub +pyparsing==2.4.7 \ + --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ + --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b + # via packaging +python-gnupg==0.4.7 \ + --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ + --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae + # via envoy.gpg.identity +pyyaml==5.4.1 \ + --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ + --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ + --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ + --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ + --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ + --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ + --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ + --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ + --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ + --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ + --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ + --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ + --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ + --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ + --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ + --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ + --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ + --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ + --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ + --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ + --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ + --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ + --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ + --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ + --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ + --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ + --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ + --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ + --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 + # via envoy.base.utils +trycast==0.3.0 \ + --hash=sha256:1b7b4c0d4b0d674770a53f34a762e52a6cd6879eb251ab21625602699920080d \ + --hash=sha256:687185b812e8d1c45f2ba841e8de7bdcdee0695dcf3464f206800505d4c65f26 + # via envoy.base.utils +typing-extensions==3.10.0.2 \ + --hash=sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e \ + --hash=sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7 \ + --hash=sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34 + # via + # aiodocker + # aiohttp +uritemplate==3.0.1 \ + --hash=sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f \ + --hash=sha256:5af8ad10cec94f215e3f48112de2022e1d5a37ed427fbd88652fa908f2ab7cae + # via gidgethub +verboselogs==1.7 \ + --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ + --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 + # via + # envoy.base.runner + # envoy.github.abstract + # envoy.github.release +yarl==1.6.3 \ + --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ + --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ + --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ + --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ + --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ + --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ + --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ + --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ + --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ + --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ + --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ + --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ + --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ + --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ + --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ + --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ + --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ + --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ + --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ + --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \ + --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ + --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ + --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ + --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ + --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ + --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ + --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ + --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ + --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ + --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ + --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ + --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ + --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ + --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ + --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ + --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ + --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 + # via aiohttp diff --git a/tools/distribution/sign.py b/tools/distribution/sign.py deleted file mode 100644 index e9830f0871255..0000000000000 --- a/tools/distribution/sign.py +++ /dev/null @@ -1,419 +0,0 @@ -#!/usr/bin/env python3 - -# You will need to have the respective system tools required for -# package signing to use this tool. -# -# For example you will need debsign to sign debs, and rpmsign to -# sign rpms. -# -# usage -# -# with bazel: -# -# bazel run //tools/distribution:sign -- -h -# -# alternatively, if you have the necessary python deps available -# -# PYTHONPATH=. ./tools/distribution/sign.py -h -# -# python requires: coloredlogs, frozendict, python-gnupg, verboselogs -# - -import argparse -import os -import shutil -import subprocess -import sys -import tarfile -from functools import cached_property -from itertools import chain -from typing import Iterator, Optional, Type - -import verboselogs - -from tools.base import runner, utils -from tools.gpg import identity - -# Replacable `__` maintainer/gpg config - python interpolation doesnt work easily -# with this string -RPMMACRO_TEMPLATE = """ -%_signature gpg -%_gpg_path __GPG_CONFIG__ -%_gpg_name __MAINTAINER__ -%_gpgbin __GPG_BIN__ -%__gpg_sign_cmd %{__gpg} gpg --force-v3-sigs --batch --verbose --no-armor --no-secmem-warning -u "%{_gpg_name}" -sbo %{__signature_filename} --digest-algo sha256 %{__plaintext_filename}' -""" - - -class SigningError(Exception): - pass - - -# Base directory signing util - - -class DirectorySigningUtil(object): - """Base class for signing utils - eg for deb or rpm packages""" - - command_name = None - _package_type = None - ext = None - - def __init__( - self, - path: str, - maintainer: identity.GPGIdentity, - log: verboselogs.VerboseLogger, - command: Optional[str] = ""): - self.path = path - self.maintainer = maintainer - self.log = log - self._command = command - - @cached_property - def command(self) -> str: - """Provided command name/path or path to available system version""" - command = self._command or shutil.which(self.command_name) - if command: - return command - raise SigningError(f"Signing software missing ({self.package_type}): {self.command_name}") - - @property - def command_args(self) -> tuple: - return () - - @property - def package_type(self) -> str: - return self._package_type or self.ext - - @property - def pkg_files(self) -> tuple: - """Tuple of paths to package files to sign""" - # TODO?(phlax): check maintainer/packager field matches key id - return tuple( - os.path.join(self.path, filename) - for filename in os.listdir(self.path) - if filename.endswith(f".{self.ext}")) - - def sign(self) -> None: - """Sign the packages""" - for pkg in self.pkg_files: - self.sign_pkg(pkg) - - def sign_command(self, pkg_file: str) -> tuple: - """Tuple of command parts to sign a specific package""" - return (self.command,) + self.command_args + (pkg_file,) - - def sign_pkg(self, pkg_file: str) -> None: - """Sign a specific package file""" - pkg_name = os.path.basename(pkg_file) - self.log.notice(f"Sign package ({self.package_type}): {pkg_name}") - response = subprocess.run( - self.sign_command(pkg_file), capture_output=True, encoding="utf-8") - - if response.returncode: - raise SigningError(response.stdout + response.stderr) - - self.log.success(f"Signed package ({self.package_type}): {pkg_name}") - - -# Runner - - -class PackageSigningRunner(runner.Runner): - """For a given `package_type` and `path` this will run the relevant signing - util for the packages they contain. - """ - - _signing_utils = () - - @classmethod - def register_util(cls, name: str, util: Type[DirectorySigningUtil]) -> None: - """Register util for signing a package type""" - cls._signing_utils = getattr(cls, "_signing_utils") + ((name, util),) - - @property - def extract(self) -> bool: - return self.args.extract - - @cached_property - def maintainer(self) -> identity.GPGIdentity: - """A representation of the maintainer with GPG capabilities""" - return self.maintainer_class(self.maintainer_name, self.maintainer_email, self.log) - - @property - def maintainer_class(self) -> Type[identity.GPGIdentity]: - return identity.GPGIdentity - - @property - def maintainer_email(self) -> str: - """Email of the maintainer if set""" - return self.args.maintainer_email - - @property - def maintainer_name(self) -> str: - """Name of the maintainer if set""" - return self.args.maintainer_name - - @property - def package_type(self) -> str: - """Package type - eg deb/rpm""" - return self.args.package_type - - @property - def path(self) -> str: - """Path to the packages directory""" - return self.args.path - - @property - def tar(self) -> bool: - return self.args.tar - - @cached_property - def signing_utils(self) -> dict: - """Configured signing utils - eg `DebSigningUtil`, `RPMSigningUtil`""" - return dict(getattr(self, "_signing_utils")) - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - super().add_arguments(parser) - parser.add_argument( - "path", default="", help="Path to the directory containing packages to sign") - parser.add_argument( - "--extract", - action="store_true", - help= - "If set, treat the path as a tarball containing directories according to package_type") - parser.add_argument("--tar", help="Path to save the signed packages as tar file") - parser.add_argument( - "--type", - default="", - choices=[c for c in self.signing_utils] + [""], - help="Package type to sign") - parser.add_argument( - "--maintainer-name", - default="", - help="Maintainer name to match when searching for a GPG key to match with") - parser.add_argument( - "--maintainer-email", - default="", - help="Maintainer email to match when searching for a GPG key to match with") - - def archive(self, path: str) -> None: - with tarfile.open(self.tar, "w") as tar: - tar.add(path, arcname=".") - - def get_signing_util(self, package_type: str, path: str) -> DirectorySigningUtil: - return self.signing_utils[package_type](path, self.maintainer, self.log) - - @runner.catches((identity.GPGError, SigningError)) - def run(self) -> Optional[int]: - if self.extract: - self.sign_tarball() - else: - self.sign_directory() - self.log.success("Successfully signed packages") - - def sign(self, package_type: str, path: str) -> None: - self.log.notice(f"Signing {package_type}s ({self.maintainer}) {path}") - self.get_signing_util(package_type, path).sign() - - def sign_all(self, path: str) -> None: - for package_type in os.listdir(path): - if package_type in self.signing_utils: - target = os.path.join(path, package_type) - self.sign(package_type, target) - - def sign_directory(self) -> None: - self.sign(self.package_type, self.path) - if self.tar: - self.archive(self.path) - - def sign_tarball(self) -> None: - if not self.tar: - raise SigningError("You must set a `--tar` file to save to when `--extract` is set") - with utils.untar(self.path) as tardir: - self.sign_all(tardir) - self.archive(tardir) - - -# RPM - - -class RPMMacro(object): - """`.rpmmacros` configuration for rpmsign""" - - _macro_filename = ".rpmmacros" - - def __init__(self, home: str, overwrite: bool = False, **kwargs): - self.home = home - self.overwrite = bool(overwrite) - self.kwargs = kwargs - - @property - def path(self) -> str: - return os.path.join(self.home, self._macro_filename) - - @property - def macro(self) -> str: - macro = self.template - for k, v in self.kwargs.items(): - macro = macro.replace(f"__{k.upper()}__", v) - return macro - - @property - def template(self) -> str: - return RPMMACRO_TEMPLATE - - def write(self) -> None: - if not self.overwrite and os.path.exists(self.path): - return - with open(self.path, "w") as f: - f.write(self.macro) - - -class RPMSigningUtil(DirectorySigningUtil): - """Sign all RPM packages in a given directory""" - - command_name = "rpmsign" - ext = "rpm" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.setup() - - @cached_property - def command(self) -> str: - if not os.path.basename(self.maintainer.gpg_bin) == "gpg2": - raise SigningError("GPG2 is required to sign RPM packages") - return super().command - - @cached_property - def command_args(self) -> tuple: - return ("--key-id", self.maintainer.fingerprint, "--addsign") - - @property - def rpmmacro(self) -> Type[RPMMacro]: - return RPMMacro - - def setup(self) -> None: - """Create the .rpmmacros file if it doesn't exist""" - self.rpmmacro( - self.maintainer.home, - maintainer=self.maintainer.name, - gpg_bin=self.maintainer.gpg_bin, - gpg_config=self.maintainer.gnupg_home).write() - - def sign_pkg(self, pkg_file: str) -> None: - os.chmod(pkg_file, 0o755) - super().sign_pkg(pkg_file) - - -# Deb - - -class DebChangesFiles(object): - """Creates a set of `changes` files for specific distros from a src - `changes` file. - - eg, if src changes file is `envoy_1.100.changes` and `Distribution:` - field is `buster bullseye`, it creates: - - `envoy_1.100.changes` -> `envoy_1.100.buster.changes` - `envoy_1.100.changes` -> `envoy_1.100.bullseye.changes` - - while replacing any instances of the original distribution name in - the respective changes files, eg: - - `buster bullseye` -> `buster` - `buster bullseye` -> `bullseye` - - finally, it removes the src changes file. - """ - - def __init__(self, src): - self.src = src - - def __iter__(self) -> Iterator[str]: - """Iterate the required changes files, creating them, yielding the paths - of the newly created files, and deleting the original - """ - for path in self.files: - yield path - os.unlink(self.src) - - @cached_property - def distributions(self) -> str: - """Find and parse the `Distributions` header in the `changes` file""" - with open(self.src) as f: - line = f.readline() - while line: - if not line.startswith("Distribution:"): - line = f.readline() - continue - return line.split(":")[1].strip() - raise SigningError(f"Did not find Distribution field in changes file {self.src}") - - @property - def files(self) -> Iterator[str]: - """Create changes files for each distro, yielding the paths""" - for distro in self.distributions.split(): - yield self.changes_file(distro) - - def changes_file(self, distro: str) -> str: - """Create a `changes` file for a specific distro""" - target = self.changes_file_path(distro) - with open(target, "w") as df: - with open(self.src) as f: - df.write(f.read().replace(self.distributions, distro)) - return target - - def changes_file_path(self, distro: str) -> str: - """Path to write the new changes file to""" - return ".".join([os.path.splitext(self.src)[0], distro, "changes"]) - - -class DebSigningUtil(DirectorySigningUtil): - """Sign all `changes` packages in a given directory - - the `.changes` spec allows a single `.changes` file to have multiple `Distributions` listed. - - but, most package repos require a single signed `.change` file per distribution, with only one - distribution listed. - - this extracts the `.changes` files to -> per-distro `filename.distro.changes`, and removes - the original, before signing the files. - """ - - command_name = "debsign" - ext = "changes" - _package_type = "deb" - - @cached_property - def command_args(self) -> tuple: - return ("-k", self.maintainer.fingerprint) - - @property - def changes_files(self) -> Type[DebChangesFiles]: - return DebChangesFiles - - @cached_property - def pkg_files(self) -> tuple: - """Mangled .changes paths""" - return tuple(chain.from_iterable(self.changes_files(src) for src in super().pkg_files)) - - -# Setup - - -def _register_utils() -> None: - PackageSigningRunner.register_util("deb", DebSigningUtil) - PackageSigningRunner.register_util("rpm", RPMSigningUtil) - - -def main(*args) -> int: - _register_utils() - return PackageSigningRunner(*args).run() - - -if __name__ == "__main__": - sys.exit(main(*sys.argv[1:])) diff --git a/tools/distribution/tests/test_sign.py b/tools/distribution/tests/test_sign.py deleted file mode 100644 index e2c80d8af5b36..0000000000000 --- a/tools/distribution/tests/test_sign.py +++ /dev/null @@ -1,1014 +0,0 @@ -import types -from unittest.mock import MagicMock, PropertyMock - -import pytest - -from tools.base import runner -from tools.distribution import sign -from tools.gpg import identity - - -# DirectorySigningUtil - -@pytest.mark.parametrize("command", ["", None, "COMMAND", "OTHERCOMMAND"]) -def test_util_constructor(command): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - args = ("PATH", maintainer, "LOG") - if command is not None: - args += (command, ) - util = sign.DirectorySigningUtil(*args) - assert util.path == "PATH" - assert util.maintainer == maintainer - assert util.log == "LOG" - assert util._command == (command or "") - assert util.command_args == () - - -@pytest.mark.parametrize("command_name", ["", None, "CMD", "OTHERCMD"]) -@pytest.mark.parametrize("command", ["", None, "COMMAND", "OTHERCOMMAND"]) -@pytest.mark.parametrize("which", ["", None, "PATH", "OTHERPATH"]) -def test_util_command(patches, command_name, command, which): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - util = sign.DirectorySigningUtil("PATH", maintainer, "LOG", command=command) - patched = patches( - "shutil", - ("DirectorySigningUtil.package_type", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - if command_name is not None: - util.command_name = command_name - - with patched as (m_shutil, m_type): - m_shutil.which.return_value = which - - if not which and not command: - with pytest.raises(sign.SigningError) as e: - util.command - - assert ( - list(m_shutil.which.call_args) - == [(command_name,), {}]) - assert ( - e.value.args[0] - == f"Signing software missing ({m_type.return_value}): {command_name}") - return - - result = util.command - - assert "command" in util.__dict__ - assert not m_type.called - - if command: - assert not m_shutil.which.called - assert result == command - return - - assert ( - list(m_shutil.which.call_args) - == [(command_name,), {}]) - assert result == m_shutil.which.return_value - - -def test_util_sign(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") - patched = patches( - "DirectorySigningUtil.sign_pkg", - ("DirectorySigningUtil.pkg_files", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_sign, m_pkgs): - m_pkgs.return_value = ("PKG1", "PKG2", "PKG3") - assert not util.sign() - - assert ( - list(list(c) for c in m_sign.call_args_list) - == [[('PKG1',), {}], - [('PKG2',), {}], - [('PKG3',), {}]]) - - -def test_util_sign_command(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") - patched = patches( - ("DirectorySigningUtil.command", dict(new_callable=PropertyMock)), - ("DirectorySigningUtil.command_args", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_command, m_args): - m_args.return_value = ("ARG1", "ARG2", "ARG3") - assert ( - util.sign_command("PACKAGE") - == (m_command.return_value, ) + m_args.return_value + ("PACKAGE", )) - - -@pytest.mark.parametrize("returncode", [0, 1]) -def test_util_sign_pkg(patches, returncode): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") - patched = patches( - "os", - "subprocess", - "DirectorySigningUtil.sign_command", - ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), - ("DirectorySigningUtil.package_type", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - util.log = MagicMock() - - with patched as (m_os, m_subproc, m_command, m_log, m_type): - m_subproc.run.return_value.returncode = returncode - if returncode: - with pytest.raises(sign.SigningError) as e: - util.sign_pkg("PACKAGE") - else: - assert not util.sign_pkg("PACKAGE") - - assert ( - list(m_os.path.basename.call_args) - == [('PACKAGE',), {}]) - assert ( - list(util.log.notice.call_args) - == [(f"Sign package ({m_type.return_value}): {m_os.path.basename.return_value}",), {}]) - assert ( - list(m_command.call_args) - == [('PACKAGE',), {}]) - assert ( - list(m_subproc.run.call_args) - == [(m_command.return_value,), - {'capture_output': True, - 'encoding': 'utf-8'}]) - - if not returncode: - assert ( - list(util.log.success.call_args) - == [(f"Signed package ({m_type.return_value}): {m_os.path.basename.return_value}",), {}]) - return - assert e.value.args[0] == m_subproc.run.return_value.stdout + m_subproc.run.return_value.stderr - - -@pytest.mark.parametrize("ext", ["EXT1", "EXT2"]) -@pytest.mark.parametrize("package_type", [None, "", "TYPE1", "TYPE2"]) -def test_util_package_type(ext, package_type): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") - util.ext = ext - util._package_type = package_type - assert util.package_type == package_type or ext - - -@pytest.mark.parametrize( - "files", - [[], - ["abc", "xyz"], - ["abc.EXT", "xyz.EXT", "abc.FOO", "abc.BAR"], - ["abc.NOTEXT", "xyz.NOTEXT"]]) -def test_util_pkg_files(patches, files): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") - patched = patches( - "os", - ("DirectorySigningUtil.ext", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - with patched as (m_os, m_ext): - m_ext.return_value = "EXT" - m_os.listdir.return_value = files - result = util.pkg_files - - expected = [fname for fname in files if fname.endswith(".EXT")] - - assert ( - list(m_os.listdir.call_args) - == [("PATH",), {}]) - if not expected: - assert not m_os.path.join.called - assert not result - else: - assert ( - result - == tuple( - m_os.path.join.return_value - for fname in expected)) - assert ( - list(list(c) for c in m_os.path.join.call_args_list) - == [[("PATH", fname), {}] - for fname in expected]) - - assert "pkg_files" not in util.__dict__ - - -# PackageSigningRunner - -def test_packager_constructor(): - packager = sign.PackageSigningRunner("x", "y", "z") - assert isinstance(packager, runner.Runner) - assert packager.maintainer_class == identity.GPGIdentity - assert packager._signing_utils == () - - -def test_packager_cls_register_util(): - assert sign.PackageSigningRunner._signing_utils == () - - class Util1(object): - pass - - class Util2(object): - pass - - sign.PackageSigningRunner.register_util("util1", Util1) - assert ( - sign.PackageSigningRunner._signing_utils - == (('util1', Util1),)) - - sign.PackageSigningRunner.register_util("util2", Util2) - assert ( - sign.PackageSigningRunner._signing_utils - == (('util1', Util1), - ('util2', Util2),)) - - -def test_packager_extract(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_args, ): - assert packager.extract == m_args.return_value.extract - - assert "extract" not in packager.__dict__ - - -def test_packager_maintainer(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.maintainer_class", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.maintainer_email", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.maintainer_name", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_log, m_class, m_email, m_name): - assert packager.maintainer == m_class.return_value.return_value - - assert ( - list(m_class.return_value.call_args) - == [(m_name.return_value, m_email.return_value, m_log.return_value), {}]) - - assert "maintainer" in packager.__dict__ - - -def test_packager_maintainer_email(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_args, ): - assert packager.maintainer_email == m_args.return_value.maintainer_email - - assert "maintainer_email" not in packager.__dict__ - - -def test_packager_maintainer_name(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - - patched = patches( - ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_args, ): - assert packager.maintainer_name == m_args.return_value.maintainer_name - - assert "maintainer_name" not in packager.__dict__ - - -def test_packager_package_type(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - - patched = patches( - ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_args, ): - assert packager.package_type == m_args.return_value.package_type - - assert "package_type" not in packager.__dict__ - - -def test_packager_path(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - - patched = patches( - ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_args, ): - assert packager.path == m_args.return_value.path - - assert "path" not in packager.__dict__ - - -def test_packager_tar(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_args, ): - assert packager.tar == m_args.return_value.tar - - assert "tar" not in packager.__dict__ - - -def test_packager_signing_utils(): - packager = sign.PackageSigningRunner("x", "y", "z") - _utils = (("NAME1", "UTIL1"), ("NAME2", "UTIL2")) - packager._signing_utils = _utils - assert packager.signing_utils == dict(_utils) - - -def test_packager_add_arguments(): - packager = sign.PackageSigningRunner("x", "y", "z") - parser = MagicMock() - packager.add_arguments(parser) - assert ( - list(list(c) for c in parser.add_argument.call_args_list) - == [[('--log-level', '-l'), - {'choices': ['debug', 'info', 'warn', 'error'], - 'default': 'info', - 'help': 'Log level to display'}], - [('path',), - {'default': '', - 'help': 'Path to the directory containing packages to sign'}], - [('--extract',), - {'action': 'store_true', - 'help': 'If set, treat the path as a tarball containing directories ' - 'according to package_type'}], - [('--tar',), - {'help': 'Path to save the signed packages as tar file'}], - [('--type',), - {'choices': ['util1', 'util2', ''], - 'default': '', - 'help': 'Package type to sign'}], - [('--maintainer-name',), - {'default': '', 'help': 'Maintainer name to match when searching for a GPG key to match with'}], - [('--maintainer-email',), - {'default': '', - 'help': 'Maintainer email to match when searching for a GPG key to match with'}]]) - - -def test_packager_archive(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - "tarfile", - ("PackageSigningRunner.tar", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_tarfile, m_tar): - assert not packager.archive("PATH") - - assert ( - list(m_tarfile.open.call_args) - == [(m_tar.return_value, 'w'), {}]) - assert ( - list(m_tarfile.open.return_value.__enter__.return_value.add.call_args) - == [('PATH',), {'arcname': '.'}]) - - -def test_packager_get_signing_util(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.maintainer", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.signing_utils", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_log, m_maintainer, m_utils): - assert packager.get_signing_util("UTIL", "PATH") == m_utils.return_value.__getitem__.return_value.return_value - - assert ( - list(m_utils.return_value.__getitem__.call_args) - == [("UTIL",), {}]) - assert ( - list(m_utils.return_value.__getitem__.return_value.call_args) - == [("PATH", m_maintainer.return_value, m_log.return_value), {}]) - - -@pytest.mark.parametrize("extract", [True, False]) -@pytest.mark.parametrize("raises", [None, Exception, identity.GPGError, sign.SigningError]) -def test_packager_run(patches, extract, raises): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - "PackageSigningRunner.sign_tarball", - "PackageSigningRunner.sign_directory", - ("PackageSigningRunner.extract", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_tarb, m_dir, m_extract, m_log): - m_extract.return_value = extract - if raises: - _error = raises("AN ERROR OCCURRED") - m_extract.side_effect = _error - - if raises == Exception: - with pytest.raises(raises): - packager.run() - else: - assert packager.run() == (1 if raises else None) - - if raises: - assert not m_tarb.called - assert not m_dir.called - assert not m_log.return_value.success.called - - if raises == Exception: - return - assert ( - list(m_log.return_value.error.call_args) - == [(str(_error),), {}]) - return - - assert ( - list(m_log.return_value.success.call_args) - == [('Successfully signed packages',), {}]) - - if extract: - assert ( - list(m_tarb.call_args) - == [(), {}]) - assert not m_dir.called - return - assert not m_tarb.called - assert ( - list(m_dir.call_args) - == [(), {}]) - - -def test_packager_sign(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - "PackageSigningRunner.get_signing_util", - ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.maintainer", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_util, m_log, m_maintainer): - assert not packager.sign("PACKAGE_TYPE", "PATH") - - assert ( - list(m_log.return_value.notice.call_args) - == [(f"Signing PACKAGE_TYPEs ({m_maintainer.return_value}) PATH",), {}]) - assert ( - list(m_util.call_args) - == [('PACKAGE_TYPE', 'PATH'), {}]) - assert ( - list(m_util.return_value.sign.call_args) - == [(), {}]) - - -@pytest.mark.parametrize("utils", [[], ["a", "b", "c"]]) -@pytest.mark.parametrize("listdir", [[], ["a", "b"], ["b", "c"], ["c", "d"]]) -def test_packager_sign_all(patches, listdir, utils): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - "os", - "PackageSigningRunner.sign", - ("PackageSigningRunner.signing_utils", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_os, m_sign, m_utils): - m_os.listdir.return_value = listdir - m_utils.return_value = utils - assert not packager.sign_all("PATH") - assert ( - list(m_os.listdir.call_args) - == [('PATH',), {}]) - expected = [x for x in listdir if x in utils] - assert ( - list(list(c) for c in m_os.path.join.call_args_list) - == [[('PATH', k), {}] for k in expected]) - assert ( - list(list(c) for c in m_sign.call_args_list) - == [[(k, m_os.path.join.return_value), {}] for k in expected]) - - -@pytest.mark.parametrize("tar", [True, False]) -def test_packager_sign_directory(patches, tar): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - "PackageSigningRunner.archive", - "PackageSigningRunner.sign", - ("PackageSigningRunner.package_type", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.path", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.tar", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_archive, m_sign, m_type, m_path, m_tar): - m_tar.return_value = tar - assert not packager.sign_directory() - - assert ( - list(m_sign.call_args) - == [(m_type.return_value, m_path.return_value), {}]) - if not tar: - assert not m_archive.called - return - - assert ( - list(m_archive.call_args) - == [(m_path.return_value, ), {}]) - - -@pytest.mark.parametrize("tar", [True, False]) -def test_packager_sign_tarball(patches, tar): - packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( - "utils", - "PackageSigningRunner.archive", - "PackageSigningRunner.sign_all", - ("PackageSigningRunner.path", dict(new_callable=PropertyMock)), - ("PackageSigningRunner.tar", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_utils, m_archive, m_sign, m_path, m_tar): - m_tar.return_value = tar - if not tar: - with pytest.raises(sign.SigningError) as e: - packager.sign_tarball() - else: - assert not packager.sign_tarball() - - if not tar: - assert ( - e.value.args[0] - == 'You must set a `--tar` file to save to when `--extract` is set') - assert not m_utils.untar.called - assert not m_sign.called - assert not m_archive.called - return - - assert ( - list(m_utils.untar.call_args) - == [(m_path.return_value,), {}]) - assert ( - list(m_sign.call_args) - == [(m_utils.untar.return_value.__enter__.return_value,), {}]) - assert ( - list(m_archive.call_args) - == [(m_utils.untar.return_value.__enter__.return_value,), {}]) - - -# RPMMacro - -@pytest.mark.parametrize("overwrite", [[], None, True, False]) -@pytest.mark.parametrize("kwargs", [{}, dict(K1="V1", K2="V2")]) -def test_rpmmacro_constructor(patches, overwrite, kwargs): - rpmmacro = ( - sign.RPMMacro("HOME", overwrite=overwrite, **kwargs) - if overwrite != [] - else sign.RPMMacro("HOME", **kwargs)) - assert rpmmacro._macro_filename == ".rpmmacros" - assert rpmmacro.home == "HOME" - assert rpmmacro.overwrite == bool(overwrite or False) - assert rpmmacro.kwargs == kwargs - assert rpmmacro.template == sign.RPMMACRO_TEMPLATE - - -def test_rpmmacro_path(patches): - rpmmacro = sign.RPMMacro("HOME") - patched = patches( - "os", - prefix="tools.distribution.sign") - with patched as (m_os, ): - assert rpmmacro.path == m_os.path.join.return_value - - assert ( - list(m_os.path.join.call_args) - == [('HOME', rpmmacro._macro_filename), {}]) - - -@pytest.mark.parametrize("kwargs", [{}, dict(K1="V1", K2="V2")]) -def test_rpmmacro_macro(patches, kwargs): - rpmmacro = sign.RPMMacro("HOME", **kwargs) - patched = patches( - ("RPMMacro.template", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - with patched as (m_template, ): - result = rpmmacro.macro - - expected = m_template.return_value - for k, v in kwargs.items(): - assert ( - list(expected.replace.call_args) - == [(f"__{k.upper()}__", v), {}]) - expected = expected.replace.return_value - - assert result == expected - assert "macro" not in rpmmacro.__dict__ - - -@pytest.mark.parametrize("overwrite", [True, False]) -@pytest.mark.parametrize("exists", [True, False]) -def test_rpmmacro_write(patches, overwrite, exists): - rpmmacro = sign.RPMMacro("HOME") - patched = patches( - "open", - "os", - ("RPMMacro.macro", dict(new_callable=PropertyMock)), - ("RPMMacro.path", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - rpmmacro.overwrite = overwrite - - with patched as (m_open, m_os, m_macro, m_path): - m_os.path.exists.return_value = exists - assert not rpmmacro.write() - - if not overwrite: - assert ( - list(m_os.path.exists.call_args) - == [(m_path.return_value,), {}]) - else: - assert not m_os.path.join.called - assert not m_os.exists.join.called - - if not overwrite and exists: - assert not m_open.called - return - - assert ( - list(m_open.call_args) - == [(m_path.return_value, 'w'), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.write.call_args) - == [(m_macro.return_value,), {}]) - - -# RPMSigningUtil - -@pytest.mark.parametrize("args", [(), ("ARG1", "ARG2")]) -@pytest.mark.parametrize("kwargs", [{}, dict(K1="V1", K2="V2")]) -def test_rpmsign_constructor(patches, args, kwargs): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - patched = patches( - "RPMSigningUtil.setup", - "DirectorySigningUtil.__init__", - prefix="tools.distribution.sign") - - with patched as (m_setup, m_super): - rpmsign = sign.RPMSigningUtil("PATH", maintainer, *args, **kwargs) - - assert isinstance(rpmsign, sign.DirectorySigningUtil) - assert rpmsign.ext == "rpm" - assert rpmsign.command_name == "rpmsign" - assert ( - list(m_setup.call_args) - == [(), {}]) - assert ( - list(m_super.call_args) - == [('PATH', maintainer) + args, kwargs]) - assert rpmsign.rpmmacro == sign.RPMMacro - - -@pytest.mark.parametrize("gpg2", [True, False]) -def test_rpmsign_command(patches, gpg2): - maintainer = identity.GPGIdentity() - patched = patches( - "os", - "RPMSigningUtil.__init__", - ("DirectorySigningUtil.command", dict(new_callable=PropertyMock)), - ("identity.GPGIdentity.gpg_bin", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_os, m_init, m_super, m_gpg): - m_os.path.basename.return_value = "gpg2" if gpg2 else "notgpg2" - m_init.return_value = None - rpmsign = sign.RPMSigningUtil("PATH", maintainer, "LOG") - rpmsign.maintainer = maintainer - - if gpg2: - assert rpmsign.command == m_super.return_value - else: - with pytest.raises(sign.SigningError) as e: - rpmsign.command - - assert ( - e.value.args[0] - == 'GPG2 is required to sign RPM packages') - - assert ( - list(m_os.path.basename.call_args) - == [(m_gpg.return_value,), {}]) - if gpg2: - assert "command" in rpmsign.__dict__ - else: - assert "command" not in rpmsign.__dict__ - - -def test_rpmsign_command_args(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - patched = patches( - "RPMSigningUtil.setup", - ("identity.GPGIdentity.fingerprint", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_setup, m_fingerprint): - rpmsign = sign.RPMSigningUtil("PATH", maintainer, "LOG") - assert ( - rpmsign.command_args - == ("--key-id", m_fingerprint.return_value, - "--addsign")) - - assert "command_args" in rpmsign.__dict__ - - -class DummyRPMSigningUtil(sign.RPMSigningUtil): - - def __init__(self, path, maintainer): - self.path = path - self.maintainer = maintainer - - -def test_rpmsign_setup(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = MagicMock() - - rpmsign = DummyRPMSigningUtil("PATH", maintainer) - - patched = patches( - ("RPMSigningUtil.rpmmacro", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_macro, ): - assert not rpmsign.setup() - - assert ( - list(m_macro.return_value.call_args) - == [(maintainer.home,), - {'maintainer': maintainer.name, - 'gpg_bin': maintainer.gpg_bin, - 'gpg_config': maintainer.gnupg_home}]) - - -def test_rpmsign_sign_pkg(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - rpmsign = DummyRPMSigningUtil("PATH", maintainer) - patched = patches( - "os", - "DirectorySigningUtil.sign_pkg", - prefix="tools.distribution.sign") - - with patched as (m_os, m_sign): - assert not rpmsign.sign_pkg("FILE") - - assert ( - list(m_os.chmod.call_args) - == [('FILE', 0o755), {}]) - assert ( - list(m_sign.call_args) - == [('FILE',), {}]) - - -# DebChangesFiles - -def test_changes_constructor(): - changes = sign.DebChangesFiles("SRC") - assert changes.src == "SRC" - - -def test_changes_dunder_iter(patches): - changes = sign.DebChangesFiles("SRC") - - patched = patches( - "os", - ("DebChangesFiles.files", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - _files = ["FILE1", "FILE2", "FILE3"] - - with patched as (m_os, m_files): - m_files.return_value = _files - result = changes.__iter__() - assert list(result) == _files - - assert isinstance(result, types.GeneratorType) - assert ( - list(m_os.unlink.call_args) - == [('SRC',), {}]) - - -@pytest.mark.parametrize( - "lines", - [([], None), - (["FOO", "BAR"], None), - (["FOO", "BAR", "Distribution: distro1"], "distro1"), - (["FOO", "BAR", "Distribution: distro1 distro2"], "distro1 distro2"), - (["FOO", "BAR", "Distribution: distro1 distro2", "BAZ"], "distro1 distro2"), - (["FOO", "BAR", "", "Distribution: distro1 distro2"], None)]) -def test_changes_distributions(patches, lines): - lines, expected = lines - changes = sign.DebChangesFiles("SRC") - patched = patches( - "open", - prefix="tools.distribution.sign") - - class DummyFile(object): - line = 0 - - def __init__(self, lines): - self.lines = lines - - def readline(self): - if len(self.lines) > self.line: - line = self.lines[self.line] - self.line += 1 - return line - - _file = DummyFile(lines) - - with patched as (m_open, ): - m_open.return_value.__enter__.return_value.readline.side_effect = _file.readline - if expected: - assert changes.distributions == expected - else: - with pytest.raises(sign.SigningError) as e: - changes.distributions - assert ( - e.value.args[0] - == "Did not find Distribution field in changes file SRC") - - if "" in lines: - lines = lines[:lines.index("")] - - if expected: - breakon = 0 - for line in lines: - if line.startswith("Distribution:"): - break - breakon += 1 - lines = lines[:breakon] - count = len(lines) + 1 - assert ( - list(list(c) for c in m_open.return_value.__enter__.return_value.readline.call_args_list) - == [[(), {}]] * count) - - -def test_changes_files(patches): - changes = sign.DebChangesFiles("SRC") - - patched = patches( - "DebChangesFiles.changes_file", - ("DebChangesFiles.distributions", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_changes, m_distros): - m_distros.return_value = "DISTRO1 DISTRO2 DISTRO3" - result = changes.files - assert list(result) == [m_changes.return_value] * 3 - - assert isinstance(result, types.GeneratorType) - assert ( - list(list(c) for c in m_changes.call_args_list) - == [[('DISTRO1',), {}], - [('DISTRO2',), {}], - [('DISTRO3',), {}]]) - - -def test_changes_changes_file(patches): - changes = sign.DebChangesFiles("SRC") - patched = patches( - "open", - "DebChangesFiles.changes_file_path", - ("DebChangesFiles.distributions", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_open, m_path, m_distros): - assert ( - changes.changes_file("DISTRO") - == m_path.return_value) - - assert ( - list(m_path.call_args) - == [('DISTRO',), {}]) - assert ( - list(list(c) for c in m_open.call_args_list) - == [[(m_path.return_value, 'w'), {}], - [('SRC',), {}]]) - assert ( - list(m_open.return_value.__enter__.return_value.write.call_args) - == [(m_open.return_value.__enter__.return_value.read.return_value.replace.return_value,), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) - == [(), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.read.return_value.replace.call_args) - == [(m_distros.return_value, 'DISTRO'), {}]) - - -@pytest.mark.parametrize( - "path", - [("SRC", "SRC.DISTRO.changes"), - ("SRC.changes", "SRC.DISTRO.changes"), - ("SRC.FOO.BAR.changes", "SRC.FOO.BAR.DISTRO.changes")]) -def test_changes_file_path(path): - path, expected = path - changes = sign.DebChangesFiles(path) - assert changes.changes_file_path("DISTRO") == expected - - -# DebSigningUtil - -@pytest.mark.parametrize("args", [(), ("ARG1", ), ("ARG2", )]) -def test_debsign_constructor(patches, args): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - debsign = sign.DebSigningUtil("PATH", maintainer, "LOG", *args) - - assert isinstance(debsign, sign.DirectorySigningUtil) - assert debsign.ext == "changes" - assert debsign.command_name == "debsign" - assert debsign._package_type == "deb" - assert debsign.changes_files == sign.DebChangesFiles - assert debsign.path == "PATH" - assert debsign.maintainer == maintainer - assert debsign.log == "LOG" - - -def test_debsign_command_args(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - patched = patches( - ("identity.GPGIdentity.fingerprint", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_fingerprint, ): - debsign = sign.DebSigningUtil("PATH", maintainer, "LOG") - assert ( - debsign.command_args - == ("-k", m_fingerprint.return_value)) - - assert "command_args" in debsign.__dict__ - - -def test_debsign_pkg_files(patches): - packager = sign.PackageSigningRunner("x", "y", "z") - maintainer = identity.GPGIdentity(packager) - debsign = sign.DebSigningUtil("PATH", maintainer, "LOG") - patched = patches( - "chain", - ("DirectorySigningUtil.pkg_files", dict(new_callable=PropertyMock)), - ("DebSigningUtil.changes_files", dict(new_callable=PropertyMock)), - prefix="tools.distribution.sign") - - with patched as (m_chain, m_pkg, m_changes): - m_pkg.return_value = ("FILE1", "FILE2", "FILE3") - m_chain.from_iterable.side_effect = lambda _iter: list(_iter) - assert ( - debsign.pkg_files - == (m_changes.return_value.return_value, ) * 3) - - assert m_chain.from_iterable.called - assert ( - list(list(c) for c in m_changes.return_value.call_args_list) - == [[('FILE1',), {}], [('FILE2',), {}], [('FILE3',), {}]]) - - -# Module - -def test_sign_main(patches, command_main): - patched = patches( - "_register_utils", - prefix="tools.distribution.sign") - - with patched as (m_reg, ): - command_main( - sign.main, - "tools.distribution.sign.PackageSigningRunner") - - assert ( - list(m_reg.call_args) - == [(), {}]) - - -def test_sign_register_utils(patches, command_main): - patched = patches( - "PackageSigningRunner.register_util", - prefix="tools.distribution.sign") - - with patched as (m_reg, ): - sign._register_utils() - - assert ( - list(list(c) for c in m_reg.call_args_list) - == [[('deb', sign.DebSigningUtil), {}], - [('rpm', sign.RPMSigningUtil), {}]]) diff --git a/tools/docker/BUILD b/tools/docker/BUILD deleted file mode 100644 index 9fd20eedd3b1f..0000000000000 --- a/tools/docker/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -load("//bazel:envoy_build_system.bzl", "envoy_package") -load("@docker_pip3//:requirements.bzl", "requirement") -load("//tools/base:envoy_python.bzl", "envoy_py_library") - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_py_library( - name = "tools.docker.utils", - deps = [ - requirement("aiodocker"), - ], -) diff --git a/tools/docker/requirements.txt b/tools/docker/requirements.txt deleted file mode 100644 index 6e65c176c2389..0000000000000 --- a/tools/docker/requirements.txt +++ /dev/null @@ -1,152 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes tools/docker/requirements.txt -# -aiodocker==0.19.1 \ - --hash=sha256:59dfae91b5acbfa953baf4a3553b7c5ff375346b0f3bbfd8cae11c3b93adce04 \ - --hash=sha256:bfbb44dbee185dbc8943be68d1f51358af3ec473c463bdee68a25e33d70ae3ad - # via -r tools/docker/requirements.txt -aiohttp==3.7.4.post0 \ - --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \ - --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \ - --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \ - --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \ - --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \ - --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \ - --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \ - --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \ - --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \ - --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \ - --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \ - --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \ - --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \ - --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf \ - --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \ - --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \ - --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \ - --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \ - --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \ - --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \ - --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \ - --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \ - --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \ - --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \ - --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \ - --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \ - --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \ - --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \ - --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \ - --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \ - --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \ - --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \ - --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \ - --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \ - --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \ - --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \ - --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95 - # via aiodocker -async-timeout==3.0.1 \ - --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ - --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 - # via aiohttp -attrs==21.2.0 \ - --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ - --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb - # via aiohttp -chardet==4.0.0 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via aiohttp -idna==3.2 \ - --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ - --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 - # via yarl -multidict==5.1.0 \ - --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ - --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ - --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ - --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ - --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ - --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ - --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ - --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \ - --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ - --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ - --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ - --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ - --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ - --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ - --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ - --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ - --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ - --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ - --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ - --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ - --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ - --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ - --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ - --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ - --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ - --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ - --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ - --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ - --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ - --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ - --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ - --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ - --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ - --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ - --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ - --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ - --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 - # via - # aiohttp - # yarl -typing-extensions==3.10.0.0 \ - --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ - --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \ - --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 - # via - # aiodocker - # aiohttp -yarl==1.6.3 \ - --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ - --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ - --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ - --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ - --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ - --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ - --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ - --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ - --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ - --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ - --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ - --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ - --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ - --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ - --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ - --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ - --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ - --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ - --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ - --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \ - --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ - --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ - --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ - --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ - --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ - --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ - --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ - --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ - --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ - --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ - --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ - --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ - --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ - --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ - --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ - --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ - --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 - # via aiohttp diff --git a/tools/docker/tests/test_utils.py b/tools/docker/tests/test_utils.py deleted file mode 100644 index dba3026160b27..0000000000000 --- a/tools/docker/tests/test_utils.py +++ /dev/null @@ -1,145 +0,0 @@ -from unittest.mock import AsyncMock, MagicMock - -import pytest - -from tools.docker import utils - - -class MockAsyncIterator: - def __init__(self, seq): - self.iter = iter(seq) - self.count = 0 - - def __aiter__(self): - return self - - async def __anext__(self): - self.count += 1 - try: - return next(self.iter) - except StopIteration: - raise StopAsyncIteration - - -@pytest.mark.asyncio -@pytest.mark.parametrize("args", [(), ("ARG1", ), ("ARG1", "ARG2")]) -@pytest.mark.parametrize("kwargs", [{}, dict(kkey1="VVAR1", kkey2="VVAR2")]) -async def test_util_build_image(patches, args, kwargs): - patched = patches( - "_build_image", - "tempfile", - prefix="tools.docker.utils") - - with patched as (m_build, m_temp): - assert not await utils.build_image(*args, **kwargs) - - assert ( - list(m_temp.NamedTemporaryFile.call_args) - == [(), {}]) - - assert ( - list(m_build.call_args) - == [(m_temp.NamedTemporaryFile.return_value.__enter__.return_value, ) + args, - kwargs]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("stream", [True, False]) -@pytest.mark.parametrize("buildargs", [None, dict(key1="VAR1", key2="VAR2")]) -@pytest.mark.parametrize("error", [None, "SOMETHING WENT WRONG"]) -async def test_util__build_image(patches, stream, buildargs, error): - lines = ( - dict(notstream=f"NOTLINE{i}", - stream=f"LINE{i}") - for i in range(1, 4)) - - if error: - lines = list(lines) - lines[1]["errorDetail"] = dict(message=error) - lines = iter(lines) - - docker = AsyncMock() - docker.images.build = MagicMock(return_value=MockAsyncIterator(lines)) - - _stream = MagicMock() - tar = MagicMock() - patched = patches( - "tarfile", - prefix="tools.docker.utils") - - with patched as (m_tar, ): - args = (tar, docker, "CONTEXT", "TAG") - kwargs = {} - if stream: - kwargs["stream"] = _stream - if buildargs: - kwargs["buildargs"] = buildargs - - if error: - with pytest.raises(utils.BuildError) as e: - await utils._build_image(*args, **kwargs) - else: - assert not await utils._build_image(*args, **kwargs) - - assert ( - list(m_tar.open.call_args) - == [(tar.name,), {'fileobj': tar, 'mode': 'w'}]) - assert ( - list(m_tar.open.return_value.__enter__.return_value.add.call_args) - == [('CONTEXT',), {'arcname': '.'}]) - assert ( - list(tar.seek.call_args) - == [(0,), {}]) - assert ( - list(docker.images.build.call_args) - == [(), - {'fileobj': tar, - 'encoding': 'gzip', - 'tag': 'TAG', - 'stream': True, - 'buildargs': buildargs or {}}]) - if stream and error: - assert ( - list(list(c) for c in _stream.call_args_list) - == [[('LINE1',), {}]]) - return - elif stream: - assert ( - list(list(c) for c in _stream.call_args_list) - == [[(f'LINE{i}',), {}] for i in range(1, 4)]) - return - # the iterator should be called n + 1 for the n of items - # if there was an error it should stop at the error - assert docker.images.build.return_value.count == 2 if error else 4 - assert not _stream.called - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [True, False]) -@pytest.mark.parametrize("url", [None, "URL"]) -async def test_util_docker_client(patches, raises, url): - - class DummyError(Exception): - pass - - patched = patches( - "aiodocker", - prefix="tools.docker.utils") - - with patched as (m_docker, ): - m_docker.Docker.return_value.close = AsyncMock() - if raises: - with pytest.raises(DummyError): - async with utils.docker_client(url) as docker: - raise DummyError() - else: - async with utils.docker_client(url) as docker: - pass - - assert ( - list(m_docker.Docker.call_args) - == [(url,), {}]) - assert docker == m_docker.Docker.return_value - assert ( - list(m_docker.Docker.return_value.close.call_args) - == [(), {}]) diff --git a/tools/docker/utils.py b/tools/docker/utils.py deleted file mode 100644 index 93ca70592040b..0000000000000 --- a/tools/docker/utils.py +++ /dev/null @@ -1,102 +0,0 @@ -import tarfile -import tempfile -from contextlib import asynccontextmanager -from typing import Callable, Iterator, Optional - -import aiodocker - - -class BuildError(Exception): - pass - - -async def _build_image( - tar: tempfile.NamedTemporaryFile, - docker: aiodocker.Docker, - context: str, - tag: str, - buildargs: Optional[dict] = None, - stream: Optional[Callable] = None, - **kwargs) -> None: - """Docker image builder - - if a `stream` callable arg is supplied, logs are output there. - - raises `tools.docker.utils.BuildError` with any error output. - """ - # create a tarfile from the supplied directory - with tarfile.open(tar.name, fileobj=tar, mode="w") as tarball: - tarball.add(context, arcname=".") - tar.seek(0) - - # build the docker image - build = docker.images.build( - fileobj=tar, encoding="gzip", tag=tag, stream=True, buildargs=buildargs or {}, **kwargs) - - async for line in build: - if line.get("errorDetail"): - raise BuildError( - f"Docker image failed to build {tag} {buildargs}\n{line['errorDetail']['message']}") - if stream and "stream" in line: - stream(line["stream"].strip()) - - -async def build_image(*args, **kwargs) -> None: - """Creates a Docker context by tarballing a directory, and then building an image with it - - aiodocker doesn't provide an in-built way to build docker images from a directory, only - a file, so you can't include artefacts. - - this adds the ability to include artefacts. - - as an example, assuming you have a directory containing a `Dockerfile` and some artefacts at - `/tmp/mydockercontext` - and wanted to build the image `envoy:foo` you could: - - ```python - - import asyncio - - from tools.docker import utils - - - async def myimage(): - async with utils.docker_client() as docker: - await utils.build_image( - docker, - "/tmp/mydockerbuildcontext", - "envoy:foo", - buildargs={}) - - asyncio.run(myimage()) - ``` - """ - with tempfile.NamedTemporaryFile() as tar: - await _build_image(tar, *args, **kwargs) - - -@asynccontextmanager -async def docker_client(url: Optional[str] = "") -> Iterator[aiodocker.Docker]: - """Aiodocker client - - For example to dump the docker image data: - - ```python - - import asyncio - - from tools.docker import utils - - - async def docker_images(): - async with utils.docker_client() as docker: - print(await docker.images.list()) - - asyncio.run(docker_images()) - ``` - """ - - docker = aiodocker.Docker(url) - try: - yield docker - finally: - await docker.close() diff --git a/tools/docs/BUILD b/tools/docs/BUILD index 39049acc40cfa..56892499d24d5 100644 --- a/tools/docs/BUILD +++ b/tools/docs/BUILD @@ -10,7 +10,6 @@ envoy_package() py_binary( name = "generate_extensions_security_rst", srcs = ["generate_extensions_security_rst.py"], - data = ["//source/extensions:extensions_metadata.yaml"], deps = [ "//tools/base:utils", ], diff --git a/tools/docs/generate_api_rst.py b/tools/docs/generate_api_rst.py index 2076b50d44277..e5539332de1fb 100644 --- a/tools/docs/generate_api_rst.py +++ b/tools/docs/generate_api_rst.py @@ -4,6 +4,24 @@ import tarfile +def include_package(envoy_api_protos, rst_file_path, prefix): + # `envoy_api_rst_files` is a list of file paths for .proto.rst files + # generated by protodoc + # + # we are only interested in the proto files generated for envoy protos, + # not for non-envoy dependencies + if ("pkg/" + prefix) not in rst_file_path: + return None + # derive the "canonical" path from the filepath + canonical = f"{rst_file_path.split('pkg/' + prefix)[1]}" + + # we are only interested in the actual v3 protos, not their dependencies + if (prefix + canonical) not in envoy_api_protos: + return None + + return canonical + + def main(): proto_srcs = sys.argv[1] envoy_api_rst_files = sys.argv[1:-1] @@ -24,19 +42,12 @@ def main(): ] for rst_file_path in envoy_api_rst_files: - # `envoy_api_rst_files` is a list of file paths for .proto.rst files - # generated by protodoc - # - # we are only interested in the proto files generated for envoy protos, - # not for non-envoy dependencies - if "pkg/envoy" not in rst_file_path: + canonical = include_package(envoy_api_protos, rst_file_path, "envoy/") + if canonical is None: + canonical = include_package(envoy_api_protos, rst_file_path, "contrib/envoy/") + if canonical is None: continue - # derive the "canonical" path from the filepath - canonical = f"{rst_file_path.split('pkg/envoy/')[1]}" - # we are only interested in the actual v3 protos, not their dependencies - if f"envoy/{canonical}" not in envoy_api_protos: - continue target = os.path.join("rst-out/api-v3", canonical) if not os.path.exists(os.path.dirname(target)): os.makedirs(os.path.dirname(target)) diff --git a/tools/docs/generate_extensions_security_rst.py b/tools/docs/generate_extensions_security_rst.py index 2dc6d8e84ee29..180e2eb247f69 100644 --- a/tools/docs/generate_extensions_security_rst.py +++ b/tools/docs/generate_extensions_security_rst.py @@ -18,16 +18,24 @@ def format_item(extension, metadata): item = '* :ref:`%s `' % (extension, extension) if metadata.get('status') == 'alpha': item += ' (alpha)' + if metadata.get('contrib', False): + item += ' (:ref:`contrib builds ` only)' return item def main(): metadata_filepath = sys.argv[1] - output_filename = sys.argv[2] + contrib_metadata_filepath = sys.argv[2] + output_filename = sys.argv[3] generated_rst_dir = os.path.dirname(output_filename) security_rst_root = os.path.join(generated_rst_dir, "intro/arch_overview/security") extension_db = utils.from_yaml(metadata_filepath) + contrib_extension_db = utils.from_yaml(contrib_metadata_filepath) + for contrib_extension in contrib_extension_db.keys(): + contrib_extension_db[contrib_extension]['contrib'] = True + extension_db.update(contrib_extension_db) + pathlib.Path(security_rst_root).mkdir(parents=True, exist_ok=True) security_postures = defaultdict(list) diff --git a/tools/docs/requirements.txt b/tools/docs/requirements.txt index 5b0251375c9db..075bb65491822 100644 --- a/tools/docs/requirements.txt +++ b/tools/docs/requirements.txt @@ -25,9 +25,11 @@ certifi==2021.5.30 \ chardet==4.0.0 \ --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via - # -r tools/docs/requirements.txt - # requests + # via -r tools/docs/requirements.txt +charset-normalizer==2.0.4 \ + --hash=sha256:0c8911edd15d19223366a194a513099a302055a962bca2cec0f54b8b63175d8b \ + --hash=sha256:f23667ebe1084be45f6ae0538e4a5a865206544097e4e8bbcacf42cd02a348f3 + # via requests colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 @@ -47,8 +49,8 @@ gitdb==4.0.7 \ # -r tools/docs/requirements.txt # gitpython gitpython==3.1.18 \ - --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 \ - --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b + --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b \ + --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 # via -r tools/docs/requirements.txt idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ @@ -107,14 +109,14 @@ markupsafe==2.0.1 \ # -r tools/docs/requirements.txt # jinja2 packaging==21.0 \ - --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \ - --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 + --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ + --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 # via # -r tools/docs/requirements.txt # sphinx -pygments==2.9.0 \ - --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \ - --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e +pygments==2.10.0 \ + --hash=sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380 \ + --hash=sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6 # via # -r tools/docs/requirements.txt # sphinx @@ -131,9 +133,9 @@ pytz==2021.1 \ # via # -r tools/docs/requirements.txt # babel -requests==2.25.1 \ - --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \ - --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e +requests==2.26.0 \ + --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ + --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 # via # -r tools/docs/requirements.txt # sphinx @@ -163,13 +165,13 @@ sphinx-rtd-theme==0.5.2 \ --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f # via -r tools/docs/requirements.txt -sphinx-tabs==3.1.0 \ - --hash=sha256:63df94e84bc05eb8598419a313ffc24455a14d1a580d174bb748404063958a67 \ - --hash=sha256:5eee2a869b1226e1f618f0c7ed267e5e3c24425565e6313cad80d00a7119694f +sphinx-tabs==3.2.0 \ + --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ + --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 # via -r tools/docs/requirements.txt sphinx==4.1.1 \ - --hash=sha256:3d513088236eef51e5b0adb78b0492eb22cc3b8ccdb0b36dd021173b365d4454 \ - --hash=sha256:23c846a1841af998cb736218539bb86d16f5eb95f5760b1966abcd2d584e62b8 + --hash=sha256:23c846a1841af998cb736218539bb86d16f5eb95f5760b1966abcd2d584e62b8 \ + --hash=sha256:3d513088236eef51e5b0adb78b0492eb22cc3b8ccdb0b36dd021173b365d4454 # via # -r tools/docs/requirements.txt # sphinx-copybutton @@ -232,4 +234,6 @@ urllib3==1.26.6 \ setuptools==57.0.0 \ --hash=sha256:401cbf33a7bf817d08014d51560fc003b895c4cdc1a5b521ad2969e928a07535 \ --hash=sha256:c8b9f1a457949002e358fea7d3f2a1e1b94ddc0354b2e40afc066bf95d21bf7b - # via sphinx + # via + # -r tools/docs/requirements.txt + # sphinx diff --git a/tools/docs/rst_check.py b/tools/docs/rst_check.py index 47bad1aa123a6..8a5b692d036d3 100644 --- a/tools/docs/rst_check.py +++ b/tools/docs/rst_check.py @@ -1,19 +1,30 @@ +import pathlib import re import sys -from typing import Iterator +from functools import cached_property +from typing import Iterator, List, Pattern from tools.base import checker -INVALID_REFLINK = re.compile(r".* ref:.*") -REF_WITH_PUNCTUATION_REGEX = re.compile(r".*\. <[^<]*>`\s*") -RELOADABLE_FLAG_REGEX = re.compile(r".*(...)(envoy.reloadable_features.[^ ]*)\s.*") -VERSION_HISTORY_NEW_LINE_REGEX = re.compile(r"\* ([a-z \-_]+): ([a-z:`]+)") -VERSION_HISTORY_SECTION_NAME = re.compile(r"^[A-Z][A-Za-z ]*$") +INVALID_REFLINK = r".* ref:.*" +REF_WITH_PUNCTUATION_REGEX = r".*\. <[^<]*>`\s*" +VERSION_HISTORY_NEW_LINE_REGEX = r"\* ([a-z \-_]+): ([a-z:`]+)" +VERSION_HISTORY_SECTION_NAME = r"^[A-Z][A-Za-z ]*$" +# Make sure backticks come in pairs. +# Exceptions: reflinks (ref:`` where the backtick won't be preceded by a space +# links `title `_ where the _ is checked for in the regex. +BAD_TICKS_REGEX = re.compile(r".* `[^`].*`[^_]") +# TODO(phlax): +# - generalize these checks to all rst files +# - improve checks/handling of "default role"/inline literals +# (perhaps using a sphinx plugin) +# - add rstcheck and/or rstlint -class CurrentVersionFile(object): - def __init__(self, path): +class CurrentVersionFile: + + def __init__(self, path: pathlib.Path): self._path = path @property @@ -22,8 +33,20 @@ def lines(self) -> Iterator[str]: for line in f.readlines(): yield line.strip() + @cached_property + def backticks_re(self) -> Pattern[str]: + return re.compile(BAD_TICKS_REGEX) + + @cached_property + def invalid_reflink_re(self) -> Pattern[str]: + return re.compile(INVALID_REFLINK) + + @cached_property + def new_line_re(self) -> Pattern[str]: + return re.compile(VERSION_HISTORY_NEW_LINE_REGEX) + @property - def path(self) -> str: + def path(self) -> pathlib.Path: return self._path @property @@ -33,18 +56,18 @@ def prior_endswith_period(self) -> bool: # Don't punctuation-check empty lines. or not self.prior_line # The text in the :ref ends with a . - or - (self.prior_line.endswith('`') and REF_WITH_PUNCTUATION_REGEX.match(self.prior_line))) - - def check_flags(self, line: str) -> list: - # TODO(phlax): improve checking of inline literals - # make sure flags are surrounded by ``s (ie "inline literal") - flag_match = RELOADABLE_FLAG_REGEX.match(line) - return ([f"Flag {flag_match.groups()[1]} should be enclosed in double back ticks"] - if flag_match and not flag_match.groups()[0].startswith(' ``') else []) - - def check_line(self, line: str) -> list: - errors = self.check_reflink(line) + self.check_flags(line) + or (self.prior_line.endswith('`') and self.punctuation_re.match(self.prior_line))) + + @cached_property + def punctuation_re(self) -> Pattern[str]: + return re.compile(REF_WITH_PUNCTUATION_REGEX) + + @cached_property + def section_name_re(self) -> Pattern[str]: + return re.compile(VERSION_HISTORY_SECTION_NAME) + + def check_line(self, line: str) -> List[str]: + errors = self.check_reflink(line) + self.check_ticks(line) if line.startswith("* "): errors += self.check_list_item(line) elif not line: @@ -55,16 +78,16 @@ def check_line(self, line: str) -> list: self.prior_line += line return errors - def check_list_item(self, line: str) -> list: + def check_list_item(self, line: str) -> List[str]: errors = [] if not self.prior_endswith_period: errors.append(f"The following release note does not end with a '.'\n {self.prior_line}") - match = VERSION_HISTORY_NEW_LINE_REGEX.match(line) + match = self.new_line_re.match(line) if not match: return errors + [ "Version history line malformed. " - f"Does not match VERSION_HISTORY_NEW_LINE_REGEX in docs_check.py\n {line}\n" + f"Does not match VERSION_HISTORY_NEW_LINE_REGEX\n {line}\n" "Please use messages in the form 'category: feature explanation.', " "starting with a lower-cased letter and ending with a period." ] @@ -86,19 +109,22 @@ def check_list_item(self, line: str) -> list: self.set_tokens(line, first_word, next_word) return errors - def check_previous_period(self) -> list: + def check_previous_period(self) -> List[str]: return ([f"The following release note does not end with a '.'\n {self.prior_line}"] if not self.prior_endswith_period else []) - def check_reflink(self, line: str) -> list: - # TODO(phlax): Check reflinks for all rst files + def check_reflink(self, line: str) -> List[str]: return ([f"Found text \" ref:\". This should probably be \" :ref:\"\n{line}"] - if INVALID_REFLINK.match(line) else []) + if self.invalid_reflink_re.match(line) else []) + + def check_ticks(self, line: str) -> List[str]: + return ([f"Backticks should come in pairs (except for links and refs): {line}"] if + (self.backticks_re.match(line)) else []) def run_checks(self) -> Iterator[str]: self.set_tokens() for line_number, line in enumerate(self.lines): - if VERSION_HISTORY_SECTION_NAME.match(line): + if self.section_name_re.match(line): if line == "Deprecated": break self.set_tokens() @@ -114,13 +140,14 @@ def set_tokens(self, line: str = "", first_word: str = "", next_word: str = "") class RSTChecker(checker.Checker): checks = ("current_version",) - def check_current_version(self): - errors = list(CurrentVersionFile("docs/root/version_history/current.rst").run_checks()) + def check_current_version(self) -> None: + errors = list( + CurrentVersionFile(pathlib.Path("docs/root/version_history/current.rst")).run_checks()) if errors: self.error("current_version", errors) -def main(*args) -> int: +def main(*args: str) -> int: return RSTChecker(*args).run() diff --git a/tools/docs/sphinx_runner.py b/tools/docs/sphinx_runner.py index 2550bd7c57299..53b330c16d2b9 100644 --- a/tools/docs/sphinx_runner.py +++ b/tools/docs/sphinx_runner.py @@ -1,15 +1,16 @@ import argparse import os +import pathlib import platform import re import sys import tarfile -import tempfile from functools import cached_property +from typing import Tuple -from colorama import Fore, Style +from colorama import Fore, Style # type:ignore -from sphinx.cmd.build import main as sphinx_build +from sphinx.cmd.build import main as sphinx_build # type:ignore from tools.base import runner, utils @@ -32,9 +33,9 @@ def blob_sha(self) -> str: return self.docs_tag or self.build_sha @property - def build_dir(self) -> str: + def build_dir(self) -> pathlib.Path: """Returns current build_dir - most likely a temp directory""" - return self._build_dir + return pathlib.Path(self.tempdir.name) @property def build_sha(self) -> str: @@ -47,17 +48,17 @@ def colors(self) -> dict: return dict(chrome=Fore.LIGHTYELLOW_EX, key=Fore.LIGHTCYAN_EX, value=Fore.LIGHTMAGENTA_EX) @cached_property - def config_file(self) -> str: + def config_file(self) -> pathlib.Path: """Populates a config file with self.configs and returns the file path""" return utils.to_yaml(self.configs, self.config_file_path) @property - def config_file_path(self) -> str: + def config_file_path(self) -> pathlib.Path: """Path to a (temporary) build config""" - return os.path.join(self.build_dir, "build.yaml") + return self.build_dir.joinpath("build.yaml") @cached_property - def configs(self) -> str: + def configs(self) -> dict: """Build configs derived from provided args""" _configs = dict( version_string=self.version_string, @@ -66,15 +67,15 @@ def configs(self) -> str: version_number=self.version_number, docker_image_tag_name=self.docker_image_tag_name) if self.validator_path: - _configs["validator_path"] = self.validator_path + _configs["validator_path"] = str(self.validator_path) if self.descriptor_path: - _configs["descriptor_path"] = self.descriptor_path + _configs["descriptor_path"] = str(self.descriptor_path) return _configs @property - def descriptor_path(self) -> str: + def descriptor_path(self) -> pathlib.Path: """Path to a descriptor file for config validation""" - return os.path.abspath(self.args.descriptor_path) + return pathlib.Path(self.args.descriptor_path) @property def docker_image_tag_name(self) -> str: @@ -87,14 +88,14 @@ def docs_tag(self) -> str: return self.args.docs_tag @cached_property - def html_dir(self) -> str: + def html_dir(self) -> pathlib.Path: """Path to (temporary) directory for outputting html""" - return os.path.join(self.build_dir, "generated/html") + return self.build_dir.joinpath("generated", "html") @property - def output_filename(self) -> str: + def output_filename(self) -> pathlib.Path: """Path to tar file for saving generated html docs""" - return self.args.output_filename + return pathlib.Path(self.args.output_filename) @property def py_compatible(self) -> bool: @@ -107,41 +108,40 @@ def release_level(self) -> str: return "tagged" if self.docs_tag else "pre-release" @cached_property - def rst_dir(self) -> str: + def rst_dir(self) -> pathlib.Path: """Populates an rst directory with contents of given rst tar, and returns the path to the directory """ - rst_dir = os.path.join(self.build_dir, "generated/rst") + rst_dir = self.build_dir.joinpath("generated", "rst") if self.rst_tar: - with tarfile.open(self.rst_tar) as tarfiles: - tarfiles.extractall(path=rst_dir) + utils.extract(rst_dir, self.rst_tar) return rst_dir @property - def rst_tar(self) -> str: + def rst_tar(self) -> pathlib.Path: """Path to the rst tarball""" - return self.args.rst_tar + return pathlib.Path(self.args.rst_tar) @property - def sphinx_args(self) -> list: + def sphinx_args(self) -> Tuple[str, ...]: """Command args for sphinx""" - return ["-W", "--keep-going", "--color", "-b", "html", self.rst_dir, self.html_dir] + return ( + "-W", "--keep-going", "--color", "-b", "html", str(self.rst_dir), str(self.html_dir)) @property - def validator_path(self) -> str: + def validator_path(self) -> pathlib.Path: """Path to validator utility for validating snippets""" - return os.path.abspath(self.args.validator_path) + return pathlib.Path(self.args.validator_path) @property - def version_file(self) -> str: + def version_file(self) -> pathlib.Path: """Path to version files for deriving docs version""" - return self.args.version_file + return pathlib.Path(self.args.version_file) @cached_property def version_number(self) -> str: """Semantic version""" - with open(self.version_file) as f: - return f.read().strip() + return self.version_file.read_text().strip() @property def version_string(self) -> str: @@ -151,6 +151,7 @@ def version_string(self) -> str: if self.docs_tag else f"{self.version_number}-{self.build_sha[:6]}") def add_arguments(self, parser: argparse.ArgumentParser) -> None: + super().add_arguments(parser) parser.add_argument("--build_sha") parser.add_argument("--docs_tag") parser.add_argument("--version_file") @@ -183,25 +184,19 @@ def check_env(self) -> None: raise SphinxEnvError( "Given git tag does not match the VERSION file content:" f"{self.docs_tag} vs v{self.version_number}") - with open(os.path.join(self.rst_dir, "version_history/current.rst")) as f: - if not self.version_number in f.read(): - raise SphinxEnvError( - f"Git tag ({self.version_number}) not found in version_history/current.rst") + # this should probs only check the first line + version_current = self.rst_dir.joinpath("version_history", "current.rst").read_text() + if not self.version_number in version_current: + raise SphinxEnvError( + f"Git tag ({self.version_number}) not found in version_history/current.rst") def create_tarball(self) -> None: with tarfile.open(self.output_filename, "w") as tar: tar.add(self.html_dir, arcname=".") - def run(self) -> int: - with tempfile.TemporaryDirectory() as build_dir: - return self._run(build_dir) - - def _color(self, msg, name=None): - return f"{self.colors[name or 'chrome']}{msg}{Style.RESET_ALL}" - - def _run(self, build_dir): - self._build_dir = build_dir - os.environ["ENVOY_DOCS_BUILD_CONFIG"] = self.config_file + @runner.cleansup + def run(self): + os.environ["ENVOY_DOCS_BUILD_CONFIG"] = str(self.config_file) try: self.check_env() except SphinxEnvError as e: @@ -215,6 +210,9 @@ def _run(self, build_dir): return 1 self.create_tarball() + def _color(self, msg, name=None): + return f"{self.colors[name or 'chrome']}{msg}{Style.RESET_ALL}" + def main(*args) -> int: return SphinxRunner(*args).run() diff --git a/tools/docs/tests/test_rst_check.py b/tools/docs/tests/test_rst_check.py index 19678db367817..a2bef68a71f0b 100644 --- a/tools/docs/tests/test_rst_check.py +++ b/tools/docs/tests/test_rst_check.py @@ -13,6 +13,29 @@ def test_rst_check_current_version_constructor(): assert version_file.path == "PATH" +@pytest.mark.parametrize( + "constant", + (("backticks_re", "BAD_TICKS_REGEX"), + ("invalid_reflink_re", "INVALID_REFLINK"), + ("new_line_re", "VERSION_HISTORY_NEW_LINE_REGEX"), + ("punctuation_re", "REF_WITH_PUNCTUATION_REGEX"), + ("section_name_re", "VERSION_HISTORY_SECTION_NAME"))) +def test_rst_check_current_version_regexes(patches, constant): + version_file = rst_check.CurrentVersionFile("PATH") + prop, constant = constant + patched = patches( + "re", + prefix="tools.docs.rst_check") + + with patched as (m_re, ): + assert getattr(version_file, prop) == m_re.compile.return_value + + assert ( + list(m_re.compile.call_args) + == [(getattr(rst_check, constant),), {}]) + assert prop in version_file.__dict__ + + def test_rst_check_current_version_lines(patches): version_file = rst_check.CurrentVersionFile("PATH") patched = patches( @@ -50,47 +73,6 @@ def test_rst_check_current_version_prior_ends_with_period(prior): assert version_file.prior_endswith_period == expected -@pytest.mark.parametrize("matches", [True, False, "partial"]) -def test_rst_check_current_version_check_flags(patches, matches): - version_file = rst_check.CurrentVersionFile("PATH") - patched = patches( - "RELOADABLE_FLAG_REGEX", - prefix="tools.docs.rst_check") - - with patched as (m_flag, ): - if matches == "partial": - m_flag.match.return_value.groups.return_value.__getitem__.return_value.startswith.return_value = False - elif not matches: - m_flag.match.return_value = False - result = version_file.check_flags("LINE") - - assert ( - list(m_flag.match.call_args) - == [('LINE',), {}]) - - if matches: - assert ( - list(m_flag.match.return_value.groups.call_args) - == [(), {}]) - assert ( - list(m_flag.match.return_value.groups.return_value.__getitem__.return_value.startswith.call_args) - == [(' ``',), {}]) - if matches == "partial": - assert ( - result - == [f"Flag {m_flag.match.return_value.groups.return_value.__getitem__.return_value} should be enclosed in double back ticks"]) - assert ( - list(list(c) for c in m_flag.match.return_value.groups.return_value.__getitem__.call_args_list) - == [[(0,), {}], [(1,), {}]]) - else: - assert ( - list(list(c) for c in m_flag.match.return_value.groups.return_value.__getitem__.call_args_list) - == [[(0,), {}]]) - assert result == [] - else: - assert result == [] - - @pytest.mark.parametrize("line", ["", " ", "* ", "*asdf"]) @pytest.mark.parametrize("prior_period", [True, False]) @pytest.mark.parametrize("prior_line", ["", "line_content"]) @@ -98,25 +80,26 @@ def test_rst_check_current_version_check_line(patches, line, prior_period, prior version_file = rst_check.CurrentVersionFile("PATH") patched = patches( "CurrentVersionFile.check_reflink", - "CurrentVersionFile.check_flags", "CurrentVersionFile.check_list_item", "CurrentVersionFile.check_previous_period", + "CurrentVersionFile.check_ticks", prefix="tools.docs.rst_check") version_file.prior_line = prior_line - with patched as (m_ref, m_flags, m_item, m_period): + with patched as (m_ref, m_item, m_period, m_ticks): result = version_file.check_line(line) expected = m_ref.return_value.__add__.return_value + assert ( list(m_ref.call_args) == [(line,), {}]) assert ( - list(m_flags.call_args) + list(m_ticks.call_args) == [(line,), {}]) assert ( list(m_ref.return_value.__add__.call_args) - == [(m_flags.return_value,), {}]) + == [(m_ticks.return_value,), {}]) if line.startswith("* "): assert ( @@ -159,9 +142,9 @@ def test_rst_check_current_version_check_line(patches, line, prior_period, prior def test_rst_check_current_version_check_list_item(patches, matches, prior, prior_first, prior_next, first_word, next_word): version_file = rst_check.CurrentVersionFile("PATH") patched = patches( - "VERSION_HISTORY_NEW_LINE_REGEX", "CurrentVersionFile.set_tokens", ("CurrentVersionFile.prior_endswith_period", dict(new_callable=PropertyMock)), + ("CurrentVersionFile.new_line_re", dict(new_callable=PropertyMock)), prefix="tools.docs.rst_check") version_file.prior_line = "PRIOR LINE" version_file.first_word_of_prior_line = prior_first @@ -172,11 +155,11 @@ def _get_item(item): return first_word return next_word - with patched as (m_regex, m_tokens, m_prior): + with patched as (m_tokens, m_prior, m_regex): if not matches: - m_regex.match.return_value = False + m_regex.return_value.match.return_value = False else: - m_regex.match.return_value.groups.return_value.__getitem__.side_effect = _get_item + m_regex.return_value.match.return_value.groups.return_value.__getitem__.side_effect = _get_item m_prior.return_value = prior result = version_file.check_list_item("LINE") @@ -185,13 +168,13 @@ def _get_item(item): expected += ["The following release note does not end with a '.'\n PRIOR LINE"] assert ( - list(m_regex.match.call_args) + list(m_regex.return_value.match.call_args) == [('LINE',), {}]) if not matches: expected += [ f"Version history line malformed. " - f"Does not match VERSION_HISTORY_NEW_LINE_REGEX in docs_check.py\n LINE\n" + f"Does not match VERSION_HISTORY_NEW_LINE_REGEX\n LINE\n" "Please use messages in the form 'category: feature explanation.', " "starting with a lower-cased letter and ending with a period."] assert result == expected @@ -199,7 +182,7 @@ def _get_item(item): return assert ( - list(list(c) for c in m_regex.match.return_value.groups.call_args_list) + list(list(c) for c in m_regex.return_value.match.return_value.groups.call_args_list) == [[(), {}], [(), {}]]) if prior_first and prior_first > first_word: @@ -236,15 +219,15 @@ def test_rst_check_current_version_check_previous_period(patches, prior): def test_rst_check_current_version_check_reflink(patches, matches): version_file = rst_check.CurrentVersionFile("PATH") patched = patches( - "INVALID_REFLINK", + ("CurrentVersionFile.invalid_reflink_re", dict(new_callable=PropertyMock)), prefix="tools.docs.rst_check") with patched as (m_reflink, ): - m_reflink.match.return_value = matches + m_reflink.return_value.match.return_value = matches result = version_file.check_reflink("LINE") assert ( - list(m_reflink.match.call_args) + list(m_reflink.return_value.match.call_args) == [('LINE',), {}]) if matches: @@ -255,6 +238,24 @@ def test_rst_check_current_version_check_reflink(patches, matches): assert result == [] +@pytest.mark.parametrize("matches", [True, False]) +def test_rst_check_current_version_check_ticks(patches, matches): + version_file = rst_check.CurrentVersionFile("PATH") + patched = patches( + ("CurrentVersionFile.backticks_re", dict(new_callable=PropertyMock)), + prefix="tools.docs.rst_check") + + with patched as (m_re, ): + m_re.return_value.match.return_value = matches + assert ( + version_file.check_ticks("LINE") + == (["Backticks should come in pairs (except for links and refs): LINE"] + if matches else [])) + assert ( + list(m_re.return_value.match.call_args) + == [('LINE',), {}]) + + @pytest.mark.parametrize( "lines", [[], @@ -267,16 +268,16 @@ def test_rst_check_current_version_run_checks(patches, lines, errors, matches): version_file = rst_check.CurrentVersionFile("PATH") patched = patches( "enumerate", - "VERSION_HISTORY_SECTION_NAME", "CurrentVersionFile.set_tokens", "CurrentVersionFile.check_line", ("CurrentVersionFile.lines", dict(new_callable=PropertyMock)), + ("CurrentVersionFile.section_name_re", dict(new_callable=PropertyMock)), prefix="tools.docs.rst_check") - with patched as (m_enum, m_section, m_tokens, m_check, m_lines): + with patched as (m_enum, m_tokens, m_check, m_lines, m_section): m_enum.return_value = lines m_check.return_value = errors - m_section.match.return_value = matches + m_section.return_value.match.return_value = matches _result = version_file.run_checks() assert isinstance(_result, types.GeneratorType) result = list(_result) @@ -287,7 +288,7 @@ def test_rst_check_current_version_run_checks(patches, lines, errors, matches): if not lines: assert result == [] - assert not m_section.match.called + assert not m_section.return_value.match.called assert not m_check.called assert ( list(list(c) for c in m_tokens.call_args_list) @@ -309,7 +310,7 @@ def test_rst_check_current_version_run_checks(patches, lines, errors, matches): for error in errors: _errors.append((line_number, error)) assert ( - list(list(c) for c in m_section.match.call_args_list) + list(list(c) for c in m_section.return_value.match.call_args_list) == [[(line,), {}] for line in _match]) assert ( list(list(c) for c in m_tokens.call_args_list) @@ -342,19 +343,22 @@ def test_rst_checker_constructor(): @pytest.mark.parametrize("errors", [[], ["err1", "err2"]]) def test_rst_checker_check_current_version(patches, errors): checker = rst_check.RSTChecker("path1", "path2", "path3") - patched = patches( + "pathlib", "CurrentVersionFile", "RSTChecker.error", prefix="tools.docs.rst_check") - with patched as (m_version, m_error): + with patched as (m_plib, m_version, m_error): m_version.return_value.run_checks.return_value = errors checker.check_current_version() assert ( - list(m_version.call_args) + list(m_plib.Path.call_args) == [('docs/root/version_history/current.rst',), {}]) + assert ( + list(m_version.call_args) + == [(m_plib.Path.return_value,), {}]) assert ( list(m_version.return_value.run_checks.call_args) == [(), {}]) diff --git a/tools/docs/tests/test_sphinx_runner.py b/tools/docs/tests/test_sphinx_runner.py index f85ddd4004342..ad6a3fe1f06c8 100644 --- a/tools/docs/tests/test_sphinx_runner.py +++ b/tools/docs/tests/test_sphinx_runner.py @@ -7,9 +7,6 @@ def test_sphinx_runner_constructor(): runner = sphinx_runner.SphinxRunner() - assert runner.build_dir == "." - runner._build_dir = "foo" - assert runner.build_dir == "foo" assert runner._build_sha == "UNKNOWN" assert "blob_dir" not in runner.__dict__ @@ -31,6 +28,22 @@ def test_sphinx_runner_blob_sha(patches, docs_tag): assert "blob_sha" not in runner.__dict__ +def test_sphinx_runner_build_dir(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "pathlib", + ("SphinxRunner.tempdir", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_plib, m_temp): + assert runner.build_dir == m_plib.Path.return_value + + assert ( + list(m_plib.Path.call_args) + == [(m_temp.return_value.name, ), {}]) + assert "build_dir" not in runner.__dict__ + + @pytest.mark.parametrize("build_sha", [None, "", "SOME_BUILD_SHA"]) def test_sphinx_runner_build_sha(patches, build_sha): runner = sphinx_runner.SphinxRunner() @@ -87,16 +100,15 @@ def test_sphinx_runner_config_file(patches): def test_sphinx_runner_config_file_path(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_build): - assert runner.config_file_path == m_path.join.return_value + with patched as (m_build, ): + assert runner.config_file_path == m_build.return_value.joinpath.return_value assert ( - list(m_path.join.call_args) - == [(m_build.return_value, 'build.yaml',), {}]) + list(m_build.return_value.joinpath.call_args) + == [('build.yaml',), {}]) assert "config_file_path" not in runner.__dict__ @@ -120,7 +132,10 @@ def test_sphinx_runner_configs(patches): _configs = {} for k, v in mapping.items(): - _configs[k] = _mocks[list(mapping.values()).index(v)] + _v = _mocks[list(mapping.values()).index(v)] + if k in ["validator_path", "descriptor_path"]: + _v = str(_v) + _configs[k] = _v assert result == _configs assert "configs" in runner.__dict__ @@ -128,17 +143,17 @@ def test_sphinx_runner_configs(patches): def test_sphinx_runner_descriptor_path(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_args): + with patched as (m_plib, m_args): assert ( runner.descriptor_path - == m_path.abspath.return_value) + == m_plib.Path.return_value) assert ( - list(m_path.abspath.call_args) + list(m_plib.Path.call_args) == [(m_args.return_value.descriptor_path,), {}]) assert "descriptor_path" not in runner.__dict__ @@ -177,29 +192,31 @@ def test_sphinx_runner_docs_tag(patches): def test_sphinx_runner_html_dir(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), - ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_build, m_args): - assert runner.html_dir == m_path.join.return_value + with patched as (m_build, ): + assert runner.html_dir == m_build.return_value.joinpath.return_value assert ( - list(m_path.join.call_args) - == [(m_build.return_value, 'generated/html'), {}]) - + list(m_build.return_value.joinpath.call_args) + == [('generated', 'html'), {}]) assert "html_dir" in runner.__dict__ def test_sphinx_runner_output_filename(patches): runner = sphinx_runner.SphinxRunner() patched = patches( + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_args, ): - assert runner.output_filename == m_args.return_value.output_filename + with patched as (m_plib, m_args): + assert runner.output_filename == m_plib.Path.return_value + + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.output_filename, ), {}]) assert "output_filename" not in runner.__dict__ @@ -246,41 +263,42 @@ def test_sphinx_runner_release_level(patches, docs_tag): def test_sphinx_runner_rst_dir(patches, rst_tar): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", - "tarfile", + "pathlib", + "utils", ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), ("SphinxRunner.rst_tar", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_tar, m_dir, m_rst): + with patched as (m_plib, m_utils, m_dir, m_rst): m_rst.return_value = rst_tar - assert runner.rst_dir == m_path.join.return_value + assert runner.rst_dir == m_dir.return_value.joinpath.return_value assert ( - list(m_path.join.call_args) - == [(m_dir.return_value, 'generated/rst'), {}]) + list(m_dir.return_value.joinpath.call_args) + == [('generated', 'rst'), {}]) if rst_tar: assert ( - list(m_tar.open.call_args) - == [(rst_tar,), {}]) - assert ( - list(m_tar.open.return_value.__enter__.return_value.extractall.call_args) - == [(), {'path': m_path.join.return_value}]) + list(m_utils.extract.call_args) + == [(m_dir.return_value.joinpath.return_value, rst_tar), {}]) else: - assert not m_tar.open.called + assert not m_utils.extract.called assert "rst_dir" in runner.__dict__ def test_sphinx_runner_rst_tar(patches): runner = sphinx_runner.SphinxRunner() patched = patches( + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_args, ): - assert runner.rst_tar == m_args.return_value.rst_tar + with patched as (m_plib, m_args): + assert runner.rst_tar == m_plib.Path.return_value + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.rst_tar, ), {}]) assert "rst_tar" not in runner.__dict__ @@ -294,9 +312,9 @@ def test_sphinx_runner_sphinx_args(patches): with patched as (m_html, m_rst): assert ( runner.sphinx_args - == ['-W', '--keep-going', '--color', '-b', 'html', - m_rst.return_value, - m_html.return_value]) + == ('-W', '--keep-going', '--color', '-b', 'html', + str(m_rst.return_value), + str(m_html.return_value))) assert "sphinx_args" not in runner.__dict__ @@ -304,17 +322,17 @@ def test_sphinx_runner_sphinx_args(patches): def test_sphinx_runner_validator_path(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_args): + with patched as (m_plib, m_args): assert ( runner.validator_path - == m_path.abspath.return_value) + == m_plib.Path.return_value) assert ( - list(m_path.abspath.call_args) + list(m_plib.Path.call_args) == [(m_args.return_value.validator_path,), {}]) assert "validator_path" not in runner.__dict__ @@ -322,35 +340,35 @@ def test_sphinx_runner_validator_path(patches): def test_sphinx_runner_version_file(patches): runner = sphinx_runner.SphinxRunner() patched = patches( + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_args, ): - assert runner.version_file == m_args.return_value.version_file + with patched as (m_plib, m_args): + assert runner.version_file == m_plib.Path.return_value + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.version_file, ), {}]) assert "version_file" not in runner.__dict__ def test_sphinx_runner_version_number(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "open", ("SphinxRunner.version_file", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_open, m_file): + with patched as (m_file, ): assert ( runner.version_number - == m_open.return_value.__enter__.return_value.read.return_value.strip.return_value) + == m_file.return_value.read_text.return_value.strip.return_value) assert ( - list(m_open.call_args) - == [(m_file.return_value,), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) + list(m_file.return_value.read_text.call_args) == [(), {}]) assert ( - list(m_open.return_value.__enter__.return_value.read.return_value.strip.call_args) + list(m_file.return_value.read_text.return_value.strip.call_args) == [(), {}]) assert "version_number" in runner.__dict__ @@ -378,10 +396,19 @@ def test_sphinx_runner_version_string(patches, docs_tag): assert "version_string" not in runner.__dict__ -def test_sphinx_runner_add_arguments(): +def test_sphinx_runner_add_arguments(patches): runner = sphinx_runner.SphinxRunner() parser = MagicMock() - runner.add_arguments(parser) + patched = patches( + "runner.Runner.add_arguments", + prefix="tools.docs.sphinx_runner") + + with patched as (m_super, ): + runner.add_arguments(parser) + + assert ( + list(m_super.call_args) + == [(parser, ), {}]) assert ( list(list(c) for c in parser.add_argument.call_args_list) == [[('--build_sha',), {}], @@ -464,8 +491,6 @@ def test_sphinx_runner_build_summary(patches): def test_sphinx_runner_check_env(patches, py_compat, release_level, version_number, docs_tag, current): runner = sphinx_runner.SphinxRunner() patched = patches( - "open", - "os.path", "platform", ("SphinxRunner.configs", dict(new_callable=PropertyMock)), ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), @@ -480,12 +505,12 @@ def test_sphinx_runner_check_env(patches, py_compat, release_level, version_numb and (f"v{version_number}" != docs_tag or version_number not in current))) - with patched as (m_open, m_path, m_platform, m_configs, m_version, m_tag, m_py, m_rst): + with patched as (m_platform, m_configs, m_version, m_tag, m_py, m_rst): m_py.return_value = py_compat m_configs.return_value.__getitem__.return_value = release_level m_version.return_value = version_number m_tag.return_value = docs_tag - m_open.return_value.__enter__.return_value.read.return_value = current + m_rst.return_value.joinpath.return_value.read_text.return_value = current if fails: with pytest.raises(sphinx_runner.SphinxEnvError) as e: @@ -498,15 +523,12 @@ def test_sphinx_runner_check_env(patches, py_compat, release_level, version_numb e.value.args == ("ERROR: python version must be >= 3.8, " f"you have {m_platform.python_version.return_value}", )) - assert not m_open.called return if release_level != "tagged": - assert not m_open.called return if f"v{version_number}" != docs_tag: - assert not m_open.called assert ( e.value.args == ("Given git tag does not match the VERSION file content:" @@ -514,14 +536,8 @@ def test_sphinx_runner_check_env(patches, py_compat, release_level, version_numb return assert ( - list(m_open.call_args) - == [(m_path.join.return_value,), {}]) - assert ( - list(m_path.join.call_args) - == [(m_rst.return_value, "version_history/current.rst"), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) - == [(), {}]) + list(m_rst.return_value.joinpath.call_args) + == [("version_history", "current.rst"), {}]) if version_number not in current: assert ( @@ -529,6 +545,27 @@ def test_sphinx_runner_check_env(patches, py_compat, release_level, version_numb == (f"Git tag ({version_number}) not found in version_history/current.rst", )) +@pytest.mark.parametrize("exists", [True, False]) +def test_sphinx_runner_cleanup(patches, exists): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.tempdir", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_temp, ): + if exists: + runner.__dict__["tempdir"] = m_temp.return_value + assert not runner.cleanup() + + assert not "tempdir" in runner.__dict__ + if exists: + assert ( + list(m_temp.return_value.cleanup.call_args) + == [(), {}]) + else: + assert not m_temp.called + + def test_sphinx_runner_create_tarball(patches): runner = sphinx_runner.SphinxRunner() patched = patches( @@ -548,41 +585,9 @@ def test_sphinx_runner_create_tarball(patches): == [(m_html.return_value,), {'arcname': '.'}]) -def test_sphinx_runner_run(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "tempfile", - "SphinxRunner._run", - prefix="tools.docs.sphinx_runner") - - with patched as (m_tmp, m_run): - assert runner.run() == m_run.return_value - - assert ( - list(m_run.call_args) - == [(m_tmp.TemporaryDirectory.return_value.__enter__.return_value,), {}]) - - -@pytest.mark.parametrize("color", [None, "COLOR"]) -def test_sphinx_runner__color(patches, color): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "Style", - ("SphinxRunner.colors", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_style, m_colors): - assert ( - runner._color("MSG", color) - == f"{m_colors.return_value.__getitem__.return_value}MSG{m_style.RESET_ALL}") - assert ( - list(m_colors.return_value.__getitem__.call_args) - == [(color or "chrome",), {}]) - - @pytest.mark.parametrize("check_fails", [True, False]) @pytest.mark.parametrize("build_fails", [True, False]) -def test_sphinx_runner__run(patches, check_fails, build_fails): +def test_sphinx_runner_run(patches, check_fails, build_fails): runner = sphinx_runner.SphinxRunner() patched = patches( "print", @@ -597,6 +602,8 @@ def test_sphinx_runner__run(patches, check_fails, build_fails): def _raise(error): raise error + assert runner.run.__wrapped__.__cleansup__ + with patched as (m_print, m_os, m_summary, m_check, m_build, m_create, m_config): if check_fails: _check_error = sphinx_runner.SphinxEnvError("CHECK FAILED") @@ -604,17 +611,14 @@ def _raise(error): if build_fails: _build_error = sphinx_runner.SphinxBuildError("BUILD FAILED") m_build.side_effect = lambda: _raise(_build_error) - assert runner._run("BUILD_DIR") == (1 if (check_fails or build_fails) else None) + assert runner.run() == (1 if (check_fails or build_fails) else None) - assert ( - runner._build_dir - == "BUILD_DIR") assert ( list(m_check.call_args) == [(), {}]) assert ( list(m_os.environ.__setitem__.call_args) - == [('ENVOY_DOCS_BUILD_CONFIG', m_config.return_value), {}]) + == [('ENVOY_DOCS_BUILD_CONFIG', str(m_config.return_value)), {}]) if check_fails: assert ( @@ -645,6 +649,23 @@ def _raise(error): == [(), {}]) +@pytest.mark.parametrize("color", [None, "COLOR"]) +def test_sphinx_runner__color(patches, color): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "Style", + ("SphinxRunner.colors", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_style, m_colors): + assert ( + runner._color("MSG", color) + == f"{m_colors.return_value.__getitem__.return_value}MSG{m_style.RESET_ALL}") + assert ( + list(m_colors.return_value.__getitem__.call_args) + == [(color or "chrome",), {}]) + + def test_sphinx_runner_main(command_main): command_main( sphinx_runner.main, diff --git a/tools/extensions/BUILD b/tools/extensions/BUILD index b3f2dcbecaddb..52147a0e6b446 100644 --- a/tools/extensions/BUILD +++ b/tools/extensions/BUILD @@ -9,6 +9,8 @@ envoy_package() envoy_py_binary( name = "tools.extensions.extensions_check", data = [ + "//contrib:contrib_build_config.bzl", + "//contrib:extensions_metadata.yaml", "//source/extensions:extensions_metadata.yaml", "//source/extensions:extensions_build_config.bzl", "//test/extensions/filters/network/common/fuzz:uber_per_readfilter.cc", diff --git a/tools/extensions/extensions_check.py b/tools/extensions/extensions_check.py index c9cddf61a35e6..981b05b514280 100644 --- a/tools/extensions/extensions_check.py +++ b/tools/extensions/extensions_check.py @@ -6,13 +6,15 @@ import re import sys from functools import cached_property +from importlib.abc import Loader from importlib.util import spec_from_loader, module_from_spec -from importlib.machinery import SourceFileLoader +from importlib.machinery import ModuleSpec, SourceFileLoader from typing import Iterator from tools.base import checker, utils BUILD_CONFIG_PATH = "source/extensions/extensions_build_config.bzl" +CONTRIB_BUILD_CONFIG_PATH = "contrib/contrib_build_config.bzl" BUILTIN_EXTENSIONS = ( "envoy.request_id.uuid", "envoy.upstreams.tcp.generic", "envoy.transport_sockets.tls", @@ -52,7 +54,7 @@ "envoy.retry_host_predicates", "envoy.retry_priorities", "envoy.stats_sinks", "envoy.thrift_proxy.filters", "envoy.tracers", "envoy.transport_sockets.downstream", "envoy.transport_sockets.upstream", "envoy.tls.cert_validator", "envoy.upstreams", - "envoy.wasm.runtime") + "envoy.wasm.runtime", "envoy.common.key_value") EXTENSION_STATUS_VALUES = ( # This extension is stable and is expected to be production usable. @@ -69,6 +71,11 @@ FUZZ_TEST_PATH = "test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc" METADATA_PATH = "source/extensions/extensions_metadata.yaml" +CONTRIB_METADATA_PATH = "contrib/extensions_metadata.yaml" + + +class ExtensionsConfigurationError(Exception): + pass class ExtensionsChecker(checker.Checker): @@ -79,18 +86,18 @@ class ExtensionsChecker(checker.Checker): @cached_property def all_extensions(self) -> set: - return set(self.configured_extensions.keys()) | set(BUILTIN_EXTENSIONS) + return set(self.configured_extensions.keys()) | set( + self.configured_contrib_extensions.keys()) | set(BUILTIN_EXTENSIONS) @cached_property def configured_extensions(self) -> dict: - # source/extensions/extensions_build_config.bzl must have a - # .bzl suffix for Starlark import, so we are forced to do this workaround. - _extensions_build_config_spec = spec_from_loader( - "extensions_build_config", - SourceFileLoader("extensions_build_config", BUILD_CONFIG_PATH)) - extensions_build_config = module_from_spec(_extensions_build_config_spec) - _extensions_build_config_spec.loader.exec_module(extensions_build_config) - return extensions_build_config.EXTENSIONS + return ExtensionsChecker._load_build_config( + "extensions_build_config", BUILD_CONFIG_PATH, "EXTENSIONS") + + @cached_property + def configured_contrib_extensions(self) -> dict: + return ExtensionsChecker._load_build_config( + "contrib_build_config", CONTRIB_BUILD_CONFIG_PATH, "CONTRIB_EXTENSIONS") @property def fuzzed_count(self) -> int: @@ -101,7 +108,12 @@ def fuzzed_count(self) -> int: @cached_property def metadata(self) -> dict: - return utils.from_yaml(METADATA_PATH) + result = utils.from_yaml(METADATA_PATH) + result.update(utils.from_yaml(CONTRIB_METADATA_PATH)) + if not isinstance(result, dict): + raise ExtensionsConfigurationError( + f"Unable to parse metadata: {METADATA_PATH} {CONTRIB_METADATA_PATH}") + return result @property def robust_to_downstream_count(self) -> int: @@ -111,6 +123,23 @@ def robust_to_downstream_count(self) -> int: if "network" in ext and data["security_posture"] == "robust_to_untrusted_downstream" ]) + @staticmethod + def _load_build_config(name, build_config_path, dictionary_name) -> dict: + # build configs must have a .bzl suffix for Starlark import, so we are forced to do this + # workaround. + _extensions_build_config_spec = spec_from_loader( + name, SourceFileLoader(name, build_config_path)) + + if not isinstance(_extensions_build_config_spec, ModuleSpec): + raise ExtensionsConfigurationError(f"Unable to parse build config {build_config_path}") + extensions_build_config = module_from_spec(_extensions_build_config_spec) + + if not isinstance(_extensions_build_config_spec.loader, Loader): + raise ExtensionsConfigurationError(f"Unable to parse build config {build_config_path}") + + _extensions_build_config_spec.loader.exec_module(extensions_build_config) + return getattr(extensions_build_config, dictionary_name) + def check_fuzzed(self) -> None: if self.robust_to_downstream_count == self.fuzzed_count: return @@ -149,7 +178,9 @@ def _check_metadata_categories(self, extension: str) -> Iterator[str]: categories = self.metadata[extension].get("categories", ()) for cat in categories: if cat not in self.extension_categories: - yield f"Unknown extension category for {extension}: {cat}" + yield ( + f"Unknown extension category for {extension}: {cat}. " + "Please add it to tools/extensions/extensions_check.py") if not categories: yield ( f"Missing extension category for {extension}. " diff --git a/tools/extensions/tests/test_extensions_check.py b/tools/extensions/tests/test_extensions_check.py index ed1ede98d5af9..880cc1b503301 100644 --- a/tools/extensions/tests/test_extensions_check.py +++ b/tools/extensions/tests/test_extensions_check.py @@ -1,5 +1,6 @@ import types -from unittest.mock import patch, PropertyMock +from importlib.machinery import ModuleSpec +from unittest.mock import patch, PropertyMock, call import pytest @@ -25,22 +26,36 @@ def test_extensions_checker_all_extensions(): assert ( result - == set(_configured.keys()) | set(extensions_check.BUILTIN_EXTENSIONS)) + == set(_configured.keys()) | set(extensions_check.BUILTIN_EXTENSIONS) | set(checker.configured_contrib_extensions.keys())) assert "all_extensions" in checker.__dict__ -def test_extensions_checker_configured_extensions(patches): +@pytest.mark.parametrize("is_module", [True, False]) +@pytest.mark.parametrize("is_loader", [True, False]) +def test_extensions_checker_configured_extensions(patches, is_module, is_loader): checker = extensions_check.ExtensionsChecker() patched = patches( + "isinstance", "spec_from_loader", "SourceFileLoader", "module_from_spec", prefix="tools.extensions.extensions_check") - with patched as (m_spec, m_loader, m_module): - assert ( - checker.configured_extensions - == m_module.return_value.EXTENSIONS) + def _is_instance(obj, types): + if types == ModuleSpec: + return is_module + return is_loader + + with patched as (m_inst, m_spec, m_loader, m_module): + m_inst.side_effect = _is_instance + + if is_module and is_loader: + assert ( + checker.configured_extensions + == m_module.return_value.EXTENSIONS) + else: + with pytest.raises(extensions_check.ExtensionsConfigurationError) as e: + checker.configured_extensions assert ( list(m_spec.call_args) @@ -48,9 +63,20 @@ def test_extensions_checker_configured_extensions(patches): assert ( list(m_loader.call_args) == [('extensions_build_config', extensions_check.BUILD_CONFIG_PATH), {}]) + + if not is_module: + assert not m_module.called + assert not m_spec.return_value.loader.exec_module.called + return + assert ( list(m_module.call_args) == [(m_spec.return_value,), {}]) + + if not is_loader: + assert not m_spec.return_value.loader.exec_module.called + return + assert ( list(m_spec.return_value.loader.exec_module.call_args) == [(m_module.return_value,), {}]) @@ -88,20 +114,34 @@ def test_extensions_fuzzed_count(patches): assert "fuzzed_count" not in checker.__dict__ -def test_extensions_metadata(patches): +@pytest.mark.parametrize("is_dict", [True, False]) +def test_extensions_metadata(patches, is_dict): checker = extensions_check.ExtensionsChecker() patched = patches( + "isinstance", "utils", prefix="tools.extensions.extensions_check") - with patched as (m_utils, ): - assert ( - checker.metadata - == m_utils.from_yaml.return_value) + with patched as (m_inst, m_utils): + m_inst.return_value = is_dict + + if is_dict: + assert ( + checker.metadata + == m_utils.from_yaml.return_value) + else: + with pytest.raises(extensions_check.ExtensionsConfigurationError) as e: + checker.metadata assert ( - list(m_utils.from_yaml.call_args) - == [(extensions_check.METADATA_PATH,), {}]) + list(m_utils.from_yaml.call_args_list) + == [call(extensions_check.METADATA_PATH), call(extensions_check.CONTRIB_METADATA_PATH)]) + + if not is_dict: + assert ( + e.value.args[0] + == f'Unable to parse metadata: {extensions_check.METADATA_PATH} {extensions_check.CONTRIB_METADATA_PATH}') + return assert "metadata" in checker.__dict__ @@ -299,7 +339,8 @@ def test_extensions__check_metadata_categories(ext_cats, all_cats): if wrong_cats: assert ( result - == [f'Unknown extension category for EXTENSION: {cat}' for cat in wrong_cats]) + == [f'Unknown extension category for EXTENSION: {cat}. ' + 'Please add it to tools/extensions/extensions_check.py' for cat in wrong_cats]) return assert result == [] diff --git a/tools/gen_compilation_database.py b/tools/gen_compilation_database.py index a80fea082417a..8c13694b15d2d 100755 --- a/tools/gen_compilation_database.py +++ b/tools/gen_compilation_database.py @@ -93,6 +93,13 @@ def fix_compilation_database(args, db): parser.add_argument('--include_headers', action='store_true') parser.add_argument('--vscode', action='store_true') parser.add_argument( - 'bazel_targets', nargs='*', default=["//source/...", "//test/...", "//tools/..."]) + 'bazel_targets', + nargs='*', + default=[ + "//source/...", + "//test/...", + "//tools/...", + "//contrib/...", + ]) args = parser.parse_args() fix_compilation_database(args, generate_compilation_database(args)) diff --git a/tools/github/BUILD b/tools/github/BUILD new file mode 100644 index 0000000000000..779d1695d3b7c --- /dev/null +++ b/tools/github/BUILD @@ -0,0 +1 @@ +licenses(["notice"]) # Apache 2 diff --git a/tools/github/requirements.txt b/tools/github/requirements.txt deleted file mode 100644 index 603347540d225..0000000000000 --- a/tools/github/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -PyGithub==1.55 \ - --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b \ - --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 diff --git a/tools/gpg/BUILD b/tools/gpg/BUILD deleted file mode 100644 index 50a3dd91ff14e..0000000000000 --- a/tools/gpg/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -load("//bazel:envoy_build_system.bzl", "envoy_package") -load("@gpg_pip3//:requirements.bzl", "requirement") -load("//tools/base:envoy_python.bzl", "envoy_py_library") - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_py_library( - name = "tools.gpg.identity", - deps = [requirement("python-gnupg")], -) diff --git a/tools/gpg/identity.py b/tools/gpg/identity.py deleted file mode 100644 index 9d8a51aa278ce..0000000000000 --- a/tools/gpg/identity.py +++ /dev/null @@ -1,148 +0,0 @@ -import logging -import os -import pwd -import shutil -from functools import cached_property -from email.utils import formataddr, parseaddr -from typing import Optional - -import gnupg - - -class GPGError(Exception): - pass - - -class GPGIdentity(object): - """A GPG identity with a signing key - - The signing key is found either by matching provided name/email, - or by retrieving the first private key. - """ - - def __init__( - self, - name: Optional[str] = None, - email: Optional[str] = None, - log: Optional[logging.Logger] = None): - self._provided_name = name - self._provided_email = email - self._log = log - - def __str__(self) -> str: - return self.uid - - @cached_property - def email(self) -> str: - """Email parsed from the signing key""" - return parseaddr(self.uid)[1] - - @property - def fingerprint(self) -> str: - """GPG key fingerprint""" - return self.signing_key["fingerprint"] - - @cached_property - def gpg(self) -> gnupg.GPG: - return gnupg.GPG() - - @cached_property - def gpg_bin(self) -> str: - return shutil.which("gpg2") or shutil.which("gpg") - - @property - def gnupg_home(self) -> str: - return os.path.join(self.home, ".gnupg") - - @cached_property - def home(self) -> str: - """Gets *and sets if required* the `HOME` env var""" - os.environ["HOME"] = os.environ.get("HOME", pwd.getpwuid(os.getuid()).pw_dir) - return os.environ["HOME"] - - @cached_property - def log(self) -> logging.Logger: - return self._log or logging.getLogger(self.__class__.__name__) - - @property - def provided_email(self) -> Optional[str]: - """Provided email for the identity""" - return self._provided_email - - @cached_property - def provided_id(self) -> Optional[str]: - """Provided name and/or email for the identity""" - if not (self.provided_name or self.provided_email): - return - return ( - formataddr(self.provided_name, self.provided_email) if - (self.provided_name and self.provided_email) else - (self.provided_name or self.provided_email)) - - @property - def provided_name(self) -> Optional[str]: - """Provided name for the identity""" - return self._provided_name - - @cached_property - def name(self) -> str: - """Name parsed from the signing key""" - return parseaddr(self.uid)[0] - - @cached_property - def signing_key(self) -> dict: - """A `dict` representing the GPG key to sign with""" - # if name and/or email are provided the list of keys is pre-filtered - # but we still need to figure out which uid matched for the found key - for key in self.gpg.list_keys(True, keys=self.provided_id): - key = self.match(key) - if key: - return key - raise GPGError( - f"No key found for '{self.provided_id}'" if self.provided_id else "No available key") - - @property - def uid(self) -> str: - """UID of the identity's signing key""" - return self.signing_key["uid"] - - def match(self, key: dict) -> Optional[dict]: - """Match a signing key - - The key is found either by matching provided name/email - or the first available private key - - the matching `uid` (or first) is added as `uid` to the dict - """ - if self.provided_id: - key["uid"] = self._match_key(key["uids"]) - return key if key["uid"] else None - if self.log: - self.log.warning("No GPG name/email supplied, signing with first available key") - key["uid"] = key["uids"][0] - return key - - def _match_email(self, uids: list) -> Optional[str]: - """Match only the email""" - for uid in uids: - if parseaddr(uid)[1] == self.provided_email: - return uid - - def _match_key(self, uids: dict) -> Optional[str]: - """If either/both name or email are supplied it tries to match either/both""" - if self.provided_name and self.provided_email: - return self._match_uid(uids) - elif self.provided_name: - return self._match_name(uids) - elif self.provided_email: - return self._match_email(uids) - - def _match_name(self, uids: list) -> Optional[str]: - """Match only the name""" - for uid in uids: - if parseaddr(uid)[0] == self.provided_name: - return uid - - def _match_uid(self, uids: list) -> Optional[str]: - """Match the whole uid - ie `Name `""" - return self.provided_id if self.provided_id in uids else None diff --git a/tools/gpg/requirements.txt b/tools/gpg/requirements.txt deleted file mode 100644 index f405a325f70be..0000000000000 --- a/tools/gpg/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes tools/gpg/requirements.txt -# -python-gnupg==0.4.7 \ - --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ - --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae - # via -r tools/gpg/requirements.txt diff --git a/tools/gpg/tests/test_identity.py b/tools/gpg/tests/test_identity.py deleted file mode 100644 index 7e191d929cf0a..0000000000000 --- a/tools/gpg/tests/test_identity.py +++ /dev/null @@ -1,433 +0,0 @@ -from unittest.mock import MagicMock, PropertyMock - -import pytest - -from tools.gpg import identity - - -@pytest.mark.parametrize("name", ["NAME", None]) -@pytest.mark.parametrize("email", ["EMAIL", None]) -@pytest.mark.parametrize("log", ["LOG", None]) -def test_identity_constructor(name, email, log): - gpg = identity.GPGIdentity(name, email, log) - assert gpg.provided_name == name - assert gpg.provided_email == email - assert gpg._log == log - - -def test_identity_dunder_str(patches): - gpg = identity.GPGIdentity() - patched = patches( - ("GPGIdentity.uid", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_uid, ): - m_uid.return_value = "SOME BODY" - assert str(gpg) == "SOME BODY" - - -def test_identity_email(patches): - gpg = identity.GPGIdentity() - patched = patches( - "parseaddr", - ("GPGIdentity.uid", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_parse, m_uid): - assert gpg.email == m_parse.return_value.__getitem__.return_value - - assert ( - list(m_parse.return_value.__getitem__.call_args) - == [(1,), {}]) - assert ( - list(m_parse.call_args) - == [(m_uid.return_value,), {}]) - assert "email" in gpg.__dict__ - - -def test_identity_fingerprint(patches): - gpg = identity.GPGIdentity() - patched = patches( - ("GPGIdentity.signing_key", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_key, ): - assert gpg.fingerprint == m_key.return_value.__getitem__.return_value - - assert ( - list(m_key.return_value.__getitem__.call_args) - == [('fingerprint',), {}]) - - assert "fingerprint" not in gpg.__dict__ - - -def test_identity_gpg(patches): - gpg = identity.GPGIdentity() - patched = patches( - "gnupg.GPG", - prefix="tools.gpg.identity") - - with patched as (m_gpg, ): - assert gpg.gpg == m_gpg.return_value - - assert ( - list(m_gpg.call_args) - == [(), {}]) - - assert "gpg" in gpg.__dict__ - - -def test_identity_gnupg_home(patches): - gpg = identity.GPGIdentity() - patched = patches( - "os", - ("GPGIdentity.home", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_os, m_home): - assert gpg.gnupg_home == m_os.path.join.return_value - - assert ( - list(m_os.path.join.call_args) - == [(m_home.return_value, '.gnupg'), {}]) - - assert "gnupg_home" not in gpg.__dict__ - - -@pytest.mark.parametrize("gpg", [None, "GPG"]) -@pytest.mark.parametrize("gpg2", [None, "GPG2"]) -def test_identity_gpg_bin(patches, gpg, gpg2): - gpg = identity.GPGIdentity() - patched = patches( - "shutil", - prefix="tools.gpg.identity") - - def _get_bin(_cmd): - if _cmd == "gpg2" and gpg2: - return gpg2 - if _cmd == "gpg" and gpg: - return gpg - - with patched as (m_shutil, ): - m_shutil.which.side_effect = _get_bin - assert gpg.gpg_bin == gpg2 or gpg - - if gpg2: - assert ( - list(list(c) for c in m_shutil.which.call_args_list) - == [[('gpg2',), {}]]) - return - assert ( - list(list(c) for c in m_shutil.which.call_args_list) - == [[('gpg2',), {}], [('gpg',), {}]]) - - -def test_identity_home(patches): - gpg = identity.GPGIdentity() - patched = patches( - "os", - "pwd", - prefix="tools.gpg.identity") - - with patched as (m_os, m_pwd): - assert gpg.home == m_os.environ.__getitem__.return_value - - assert ( - list(m_os.environ.__getitem__.call_args) - == [('HOME', ), {}]) - assert ( - list(m_os.environ.__setitem__.call_args) - == [('HOME', m_os.environ.get.return_value), {}]) - assert ( - list(m_os.environ.get.call_args) - == [('HOME', m_pwd.getpwuid.return_value.pw_dir), {}]) - assert ( - list(m_pwd.getpwuid.call_args) - == [(m_os.getuid.return_value,), {}]) - assert ( - list(m_os.getuid.call_args) - == [(), {}]) - - assert "home" in gpg.__dict__ - - -@pytest.mark.parametrize("log", ["LOGGER", None]) -def test_identity_log(patches, log): - gpg = identity.GPGIdentity() - patched = patches( - "logging", - prefix="tools.gpg.identity") - - gpg._log = log - - with patched as (m_log, ): - if log: - assert gpg.log == log - assert not m_log.getLogger.called - else: - assert gpg.log == m_log.getLogger.return_value - assert ( - list(m_log.getLogger.call_args) - == [(gpg.__class__.__name__, ), {}]) - - -@pytest.mark.parametrize("name", ["NAME", None]) -@pytest.mark.parametrize("email", ["EMAIL", None]) -def test_identity_identity_id(patches, name, email): - gpg = identity.GPGIdentity() - patched = patches( - "formataddr", - ("GPGIdentity.provided_name", dict(new_callable=PropertyMock)), - ("GPGIdentity.provided_email", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_format, m_name, m_email): - m_name.return_value = name - m_email.return_value = email - result = gpg.provided_id - - assert "provided_id" in gpg.__dict__ - - if name and email: - assert ( - list(m_format.call_args) - == [('NAME', 'EMAIL'), {}]) - assert result == m_format.return_value - return - - assert not m_format.called - assert result == name or email - - -def test_identity_name(patches): - gpg = identity.GPGIdentity() - patched = patches( - "parseaddr", - ("GPGIdentity.uid", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_parse, m_uid): - assert gpg.name == m_parse.return_value.__getitem__.return_value - - assert ( - list(m_parse.return_value.__getitem__.call_args) - == [(0,), {}]) - assert ( - list(m_parse.call_args) - == [(m_uid.return_value,), {}]) - assert "name" in gpg.__dict__ - - -@pytest.mark.parametrize("key", ["KEY1", "KEY2", "KEY5"]) -@pytest.mark.parametrize("name", ["NAME", None]) -@pytest.mark.parametrize("email", ["EMAIL", None]) -def test_identity_signing_key(patches, key, name, email): - packager = MagicMock() - gpg = identity.GPGIdentity() - _keys = ["KEY1", "KEY2", "KEY3"] - patched = patches( - "GPGIdentity.match", - ("GPGIdentity.gpg", dict(new_callable=PropertyMock)), - ("GPGIdentity.provided_id", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_match, m_gpg, m_id): - if not name and not email: - m_id.return_value = None - m_match.side_effect = lambda k: (k == key and f"MATCH {k}") - m_gpg.return_value.list_keys.return_value = _keys - if key in _keys: - assert gpg.signing_key == f"MATCH {key}" - _match_attempts = _keys[:_keys.index(key) + 1] - else: - with pytest.raises(identity.GPGError) as e: - gpg.signing_key - if name or email: - assert ( - e.value.args[0] - == f"No key found for '{m_id.return_value}'") - else: - assert ( - e.value.args[0] - == 'No available key') - _match_attempts = _keys - - assert ( - list(m_gpg.return_value.list_keys.call_args) - == [(True, ), dict(keys=m_id.return_value)]) - assert ( - list(list(c) for c in m_match.call_args_list) - == [[(k,), {}] for k in _match_attempts]) - - -def test_identity_uid(patches): - gpg = identity.GPGIdentity() - patched = patches( - ("GPGIdentity.signing_key", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_key, ): - assert gpg.uid == m_key.return_value.__getitem__.return_value - - assert ( - list(m_key.return_value.__getitem__.call_args) - == [('uid',), {}]) - - assert "uid" not in gpg.__dict__ - - -@pytest.mark.parametrize("name", ["NAME", None]) -@pytest.mark.parametrize("email", ["EMAIL", None]) -@pytest.mark.parametrize("match", ["MATCH", None]) -@pytest.mark.parametrize("log", [True, False]) -def test_identity_match(patches, name, email, match, log): - gpg = identity.GPGIdentity() - _keys = ["KEY1", "KEY2", "KEY3"] - patched = patches( - "GPGIdentity._match_key", - ("GPGIdentity.provided_id", dict(new_callable=PropertyMock)), - ("GPGIdentity.log", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - key = dict(uids=["UID1", "UID2"]) - - with patched as (m_match, m_id, m_log): - if not log: - m_log.return_value = None - m_match.return_value = match - m_id.return_value = name or email - result = gpg.match(key) - - if not name and not email: - assert not m_match.called - if log: - assert ( - list(m_log.return_value.warning.call_args) - == [('No GPG name/email supplied, signing with first available key',), {}]) - assert ( - result - == {'uids': ['UID1', 'UID2'], 'uid': 'UID1'}) - return - assert ( - list(m_match.call_args) - == [(key["uids"],), {}]) - if log: - assert not m_log.return_value.warning.called - if match: - assert ( - result - == {'uids': ['UID1', 'UID2'], 'uid': 'MATCH'}) - else: - assert not result - - -@pytest.mark.parametrize("uids", [[], ["UID1"], ["UID1", "UID2"]]) -@pytest.mark.parametrize("email", [None, "UID1", "UID1", "UID2", "UID3"]) -def test_identity__match_email(patches, uids, email): - gpg = identity.GPGIdentity() - patched = patches( - "parseaddr", - ("GPGIdentity.provided_email", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_parse, m_email): - m_parse.side_effect = lambda _email: ("NAME", _email) - m_email.return_value = email - result = gpg._match_email(uids) - - if email in uids: - assert result == email - assert ( - list(list(c) for c in m_parse.call_args_list) - == [[(uid,), {}] for uid in uids[:uids.index(email) + 1]]) - return - - assert not result - assert ( - list(list(c) for c in m_parse.call_args_list) - == [[(uid,), {}] for uid in uids]) - - -@pytest.mark.parametrize("name", ["NAME", None]) -@pytest.mark.parametrize("email", ["EMAIL", None]) -def test_identity__match_key(patches, name, email): - gpg = identity.GPGIdentity() - _keys = ["KEY1", "KEY2", "KEY3"] - patched = patches( - "GPGIdentity._match_email", - "GPGIdentity._match_name", - "GPGIdentity._match_uid", - ("GPGIdentity.provided_email", dict(new_callable=PropertyMock)), - ("GPGIdentity.provided_name", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - key = dict(uids=["UID1", "UID2"]) - - with patched as (m_email, m_name, m_uid, m_pemail, m_pname): - m_pemail.return_value = email - m_pname.return_value = name - result = gpg._match_key(key) - - if name and email: - assert ( - list(m_uid.call_args) - == [(dict(uids=key["uids"]),), {}]) - assert not m_email.called - assert not m_name.called - assert result == m_uid.return_value - elif name: - assert ( - list(m_name.call_args) - == [(dict(uids=key["uids"]),), {}]) - assert not m_email.called - assert not m_uid.called - assert result == m_name.return_value - elif email: - assert ( - list(m_email.call_args) - == [(dict(uids=key["uids"]),), {}]) - assert not m_name.called - assert not m_uid.called - assert result == m_email.return_value - - -@pytest.mark.parametrize("uids", [[], ["UID1"], ["UID1", "UID2"]]) -@pytest.mark.parametrize("name", [None, "UID1", "UID1", "UID2", "UID3"]) -def test_identity__match_name(patches, uids, name): - gpg = identity.GPGIdentity() - patched = patches( - "parseaddr", - ("GPGIdentity.provided_name", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_parse, m_name): - m_parse.side_effect = lambda _name: (_name, "EMAIL") - m_name.return_value = name - result = gpg._match_name(uids) - - if name in uids: - assert result == name - assert ( - list(list(c) for c in m_parse.call_args_list) - == [[(uid,), {}] for uid in uids[:uids.index(name) + 1]]) - return - - assert not result - assert ( - list(list(c) for c in m_parse.call_args_list) - == [[(uid,), {}] for uid in uids]) - - -@pytest.mark.parametrize("uid", ["UID1", "UID7"]) -def test_identity__match_uid(patches, uid): - gpg = identity.GPGIdentity() - uids = [f"UID{i}" for i in range(5)] - matches = uid in uids - patched = patches( - ("GPGIdentity.provided_id", dict(new_callable=PropertyMock)), - prefix="tools.gpg.identity") - - with patched as (m_id, ): - m_id.return_value = uid - if matches: - assert gpg._match_uid(uids) == uid - else: - assert not gpg._match_uid(uids) diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index 8a2c589772ae6..b3ea6686b6be9 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -41,6 +41,15 @@ 'validate/validate.proto', ] +# Each of the following contrib extensions are allowed to be in the v3 namespace. Indicate why. +CONTRIB_V3_ALLOW_LIST = [ + # Extensions moved from core to contrib. + 'envoy.extensions.filters.http.squash.v3', + 'envoy.extensions.filters.network.kafka_broker.v3', + 'envoy.extensions.filters.network.mysql_proxy.v3', + 'envoy.extensions.filters.network.rocketmq_proxy.v3', +] + BUILD_FILE_TEMPLATE = string.Template( """# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. @@ -54,7 +63,9 @@ IGNORED_V2_PROTOS = [ "envoy/config/accesslog/v2", "envoy/config/cluster/aggregate/v2alpha", + "envoy/config/cluster/dynamic_forward_proxy/v2alpha", "envoy/config/cluster/redis", + "envoy/config/common/dynamic_forward_proxy/v2alpha", "envoy/config/common/tap/v2alpha", "envoy/config/filter/dubbo/router/v2alpha1", "envoy/config/filter/http/adaptive_concurrency/v2alpha", @@ -160,8 +171,20 @@ def get_destination_path(src): if len(matches) != 1: raise RequiresReformatError( "Expect {} has only one package declaration but has {}".format(src, len(matches))) - return pathlib.Path(get_directory_from_package( - matches[0])).joinpath(src_path.name.split('.')[0] + ".proto") + package = matches[0] + dst_path = pathlib.Path( + get_directory_from_package(package)).joinpath(src_path.name.split('.')[0] + ".proto") + # contrib API files have the standard namespace but are in a contrib folder for clarity. + # The following prepends contrib for contrib packages so we wind up with the real final path. + if 'contrib' in src: + if 'v3alpha' not in package and 'v4alpha' not in package and package not in CONTRIB_V3_ALLOW_LIST: + raise ProtoSyncError( + "contrib extension package '{}' does not use v3alpha namespace. " + "Add to CONTRIB_V3_ALLOW_LIST with an explanation if this is on purpose.".format( + package)) + + dst_path = pathlib.Path('contrib').joinpath(dst_path) + return dst_path def get_abs_rel_destination_path(dst_root, src): @@ -273,6 +296,9 @@ def get_import_deps(proto_path): if import_path.startswith('udpa/annotations/'): imports.append('@com_github_cncf_udpa//udpa/annotations:pkg') continue + if import_path.startswith('xds/type/matcher/v3/'): + imports.append('@com_github_cncf_udpa//xds/type/matcher/v3:pkg') + continue # Special case handling for UDPA core. if import_path.startswith('xds/core/v3/'): imports.append('@com_github_cncf_udpa//xds/core/v3:pkg') @@ -282,7 +308,7 @@ def get_import_deps(proto_path): imports.append( external_proto_deps.EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP[import_path]) continue - if import_path.startswith('envoy/'): + if import_path.startswith('envoy/') or import_path.startswith('contrib/'): # Ignore package internal imports. if os.path.dirname(proto_path).endswith(os.path.dirname(import_path)): continue @@ -390,6 +416,9 @@ def generate_current_api_dir(api_dir, dst_dir): api_dir: the original api directory dst_dir: the api directory to be compared in temporary directory """ + contrib_dst = dst_dir.joinpath("contrib") + shutil.copytree(str(api_dir.joinpath("contrib")), str(contrib_dst)) + dst = dst_dir.joinpath("envoy") shutil.copytree(str(api_dir.joinpath("envoy")), str(dst)) diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 8ac4ca1c43d60..0636bd08fbc75 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -23,6 +23,7 @@ py_binary( name = "protodoc", srcs = ["protodoc.py"], data = [ + "//contrib:extensions_metadata.yaml", "//docs:protodoc_manifest.yaml", "//docs:v2_mapping.json", "//source/extensions:extensions_metadata.yaml", diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index a95efe31b4b46..a8d45c7ccd1e5 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -56,7 +56,7 @@ .. _extension_{{extension}}: This extension may be referenced by the qualified name ``{{extension}}`` - +{{contrib}} .. note:: {{status}} @@ -77,12 +77,22 @@ .. _extension_category_{{category}}: .. tip:: +{% if extensions %} This extension category has the following known extensions: {% for ext in extensions %} - :ref:`{{ext}} ` {% endfor %} +{% endif %} +{% if contrib_extensions %} + The following extensions are available in :ref:`contrib ` images only: + +{% for ext in contrib_extensions %} + - :ref:`{{ext}} ` +{% endfor %} +{% endif %} + """) # A map from the extension security postures (as defined in the @@ -117,12 +127,20 @@ r = runfiles.Create() EXTENSION_DB = utils.from_yaml(r.Rlocation("envoy/source/extensions/extensions_metadata.yaml")) +CONTRIB_EXTENSION_DB = utils.from_yaml(r.Rlocation("envoy/contrib/extensions_metadata.yaml")) + # create an index of extension categories from extension db -EXTENSION_CATEGORIES = {} -for _k, _v in EXTENSION_DB.items(): - for _cat in _v['categories']: - EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k) +def build_categories(extensions_db): + ret = {} + for _k, _v in extensions_db.items(): + for _cat in _v['categories']: + ret.setdefault(_cat, []).append(_k) + return ret + + +EXTENSION_CATEGORIES = build_categories(EXTENSION_DB) +CONTRIB_EXTENSION_CATEGORIES = build_categories(CONTRIB_EXTENSION_DB) V2_LINK_TEMPLATE = Template( """ @@ -241,18 +259,29 @@ def format_extension(extension): RST formatted extension description. """ try: - extension_metadata = EXTENSION_DB[extension] + extension_metadata = EXTENSION_DB.get(extension, None) + contrib = '' + if extension_metadata is None: + extension_metadata = CONTRIB_EXTENSION_DB[extension] + contrib = """ + +.. note:: + This extension is only available in :ref:`contrib ` images. + +""" status = EXTENSION_STATUS_VALUES.get(extension_metadata.get('status'), '') security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']] categories = extension_metadata["categories"] except KeyError as e: sys.stderr.write( - f"\n\nDid you forget to add '{extension}' to source/extensions/extensions_build_config.bzl " - "or source/extensions/extensions_metadata.yaml?\n\n") + f"\n\nDid you forget to add '{extension}' to extensions_build_config.bzl, " + "extensions_metadata.yaml, contrib_build_config.bzl, " + "or contrib/extensions_metadata.yaml?\n\n") exit(1) # Raising the error buries the above message in tracebacks. return EXTENSION_TEMPLATE.render( extension=extension, + contrib=contrib, status=status, security_posture=security_posture, categories=categories) @@ -267,12 +296,14 @@ def format_extension_category(extension_category): Returns: RST formatted extension category description. """ - try: - extensions = EXTENSION_CATEGORIES[extension_category] - except KeyError as e: + extensions = EXTENSION_CATEGORIES.get(extension_category, []) + contrib_extensions = CONTRIB_EXTENSION_CATEGORIES.get(extension_category, []) + if not extensions and not contrib_extensions: raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n") return EXTENSION_CATEGORY_TEMPLATE.render( - category=extension_category, extensions=sorted(extensions)) + category=extension_category, + extensions=sorted(extensions), + contrib_extensions=sorted(contrib_extensions)) def format_header_from_file(style, source_code_info, proto_name, v2_link): diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py index 38c3b3e11338e..b30058b37a68e 100755 --- a/tools/protoxform/protoprint.py +++ b/tools/protoxform/protoprint.py @@ -268,7 +268,7 @@ def camel_case(s): # import_deprecation_proto is True or the proto is frozen. continue infra_imports.append(d) - elif d.startswith('envoy/'): + elif d.startswith('envoy/') or d.startswith('contrib/'): # We ignore existing envoy/ imports, since these are computed explicitly # from type_dependencies. pass diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index cf48779ec8bac..96dda9ee1dc88 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -15,14 +15,17 @@ ARN ASAN ASCII ASM +ASSERTed ASSERTs AST AWS BACKTRACE BSON BPF +btree CAS CB +cbor CDN CDS CEL @@ -41,6 +44,8 @@ RTP STATNAME SkyWalking TIDs +Timedout +WRSQ WASI ceil CCM @@ -183,9 +188,11 @@ Kille LBs LC LDS +LEDS LEV LF LHS +libsxg LLVM LPT LRS @@ -193,6 +200,7 @@ Loggable MB MD MERCHANTABILITY +Merkle MGET MQ MSDN @@ -245,6 +253,7 @@ PGV PID PKTINFO PNG +Pointwise PostCBs PREBIND PRNG @@ -321,6 +330,7 @@ STRLEN STS SVG SVID +SXG Symbolizer TBD TCLAP @@ -597,6 +607,7 @@ dubbo dup durations dynamodb +eg emplace emplaced emscripten @@ -762,6 +773,7 @@ lexically libc libevent libprotobuf +librdkafka libtool libstdc lifecycle @@ -1002,6 +1014,7 @@ refcount referencee referer refetch +refvec regex regexes reified @@ -1260,6 +1273,7 @@ wildcards winsock workspace writev +wrsq xDS xDSes xdstp diff --git a/tools/testdata/api_proto_breaking_change_detector/BUILD b/tools/testdata/api_proto_breaking_change_detector/BUILD new file mode 100644 index 0000000000000..0a9739fd8fb27 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/BUILD @@ -0,0 +1,38 @@ +licenses(["notice"]) # Apache 2 + +filegroup( + name = "proto_breaking_change_detector_testdata", + srcs = [ + "allowed/test_add_comment.proto", + "allowed/test_add_comment_changed", + "allowed/test_add_enum_value.proto", + "allowed/test_add_enum_value_changed", + "allowed/test_add_field.proto", + "allowed/test_add_field_changed", + "allowed/test_add_option.proto", + "allowed/test_add_option_changed", + "allowed/test_remove_and_reserve_field.proto", + "allowed/test_remove_and_reserve_field_changed", + "breaking/test_change_field_from_oneof.proto", + "breaking/test_change_field_from_oneof_changed", + "breaking/test_change_field_id.proto", + "breaking/test_change_field_id_changed", + "breaking/test_change_field_name.proto", + "breaking/test_change_field_name_changed", + "breaking/test_change_field_plurality.proto", + "breaking/test_change_field_plurality_changed", + "breaking/test_change_field_to_oneof.proto", + "breaking/test_change_field_to_oneof_changed", + "breaking/test_change_field_type.proto", + "breaking/test_change_field_type_changed", + "breaking/test_change_package_name.proto", + "breaking/test_change_package_name_changed", + "breaking/test_change_pgv_field.proto", + "breaking/test_change_pgv_field_changed", + "breaking/test_change_pgv_message.proto", + "breaking/test_change_pgv_message_changed", + "breaking/test_change_pgv_oneof.proto", + "breaking/test_change_pgv_oneof_changed", + ], + visibility = ["//visibility:public"], +) diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_comment.proto b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_comment.proto new file mode 100644 index 0000000000000..b381c393588ef --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_comment.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package test.protos.allowed; + +option java_package = "io.envoyproxy.envoy.protolock"; +option java_outer_classname = "EnvoyProtolock"; +option java_multiple_files = true; + +message AddCommentMessage { +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_comment_changed b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_comment_changed new file mode 100644 index 0000000000000..48b6a550c8178 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_comment_changed @@ -0,0 +1,10 @@ +syntax = "proto3"; +package test.protos.allowed; + +option java_package = "io.envoyproxy.envoy.protolock"; +option java_outer_classname = "EnvoyProtolock"; +option java_multiple_files = true; + +// Common configuration for all tap extensions. +message AddCommentMessage { +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_enum_value.proto b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_enum_value.proto new file mode 100644 index 0000000000000..56cadb1af24f0 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_enum_value.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package test.protos.allowed; + +message SearchRequest { + string query = 1; + int32 page_number = 2; + int32 result_per_page = 3; + enum Corpus { + UNIVERSAL = 0; + WEB = 1; + IMAGES = 2; + LOCAL = 3; + NEWS = 4; + PRODUCTS = 5; + } + Corpus corpus = 4; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_enum_value_changed b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_enum_value_changed new file mode 100644 index 0000000000000..da3446d09fa06 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_enum_value_changed @@ -0,0 +1,18 @@ +syntax = "proto3"; +package test.protos.allowed; + +message SearchRequest { + string query = 1; + int32 page_number = 2; + int32 result_per_page = 3; + enum Corpus { + UNIVERSAL = 0; + WEB = 1; + IMAGES = 2; + LOCAL = 3; + NEWS = 4; + PRODUCTS = 5; + VIDEO = 6; + } + Corpus corpus = 4; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_field.proto b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_field.proto new file mode 100644 index 0000000000000..c16ab3037c87a --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_field.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.allowed; + +message AddFieldMessage { + string three = 3; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_field_changed b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_field_changed new file mode 100644 index 0000000000000..49e8d2816997a --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_field_changed @@ -0,0 +1,7 @@ +syntax = "proto3"; +package test.protos.allowed; + +message AddFieldMessage { + string three = 3; + float four = 4; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_option.proto b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_option.proto new file mode 100644 index 0000000000000..498895fbbb8bd --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_option.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package test.protos.allowed; + +option java_package = "io.envoyproxy.envoy.protolock"; +option java_outer_classname = "EnvoyProtolock"; + +// Common configuration for all tap extensions. +message AddOptionMessage { +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_option_changed b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_option_changed new file mode 100644 index 0000000000000..1aca28ad4bed2 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_add_option_changed @@ -0,0 +1,10 @@ +syntax = "proto3"; +package test.protos.allowed; + +option java_package = "io.envoyproxy.envoy.protolock"; +option java_outer_classname = "EnvoyProtolock"; +option java_multiple_files = true; + +// Common configuration for all tap extensions. +message AddOptionMessage { +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_remove_and_reserve_field.proto b/tools/testdata/api_proto_breaking_change_detector/allowed/test_remove_and_reserve_field.proto new file mode 100644 index 0000000000000..22b7ce3a7b085 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_remove_and_reserve_field.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; +package test.protos.allowed; + +message RemoveReserveFieldMessage { + string three = 3; + float four = 4; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/allowed/test_remove_and_reserve_field_changed b/tools/testdata/api_proto_breaking_change_detector/allowed/test_remove_and_reserve_field_changed new file mode 100644 index 0000000000000..e226ff0130f70 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/allowed/test_remove_and_reserve_field_changed @@ -0,0 +1,9 @@ +syntax = "proto3"; +package test.protos.allowed; + +message RemoveReserveFieldMessage { + reserved 4; + reserved "four"; + + string three = 3; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_from_oneof.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_from_oneof.proto new file mode 100644 index 0000000000000..945e6ab9c8c5f --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_from_oneof.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package test.protos.breaking; + +message FromOneofMessage { + oneof test_oneof { + string name = 4; + string sub_message = 9; + float special = 10; + } +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_from_oneof_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_from_oneof_changed new file mode 100644 index 0000000000000..2aa8f4e02a0e3 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_from_oneof_changed @@ -0,0 +1,10 @@ +syntax = "proto3"; +package test.protos.breaking; + +message FromOneofMessage { + oneof test_oneof { + string name = 4; + string sub_message = 9; + } + float special = 10; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_id.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_id.proto new file mode 100644 index 0000000000000..15c0521a89e4e --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_id.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangeFieldIdMessage { + string three = 3; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_id_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_id_changed new file mode 100644 index 0000000000000..9c31f17094025 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_id_changed @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangeFieldIdMessage { + string three = 42; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_name.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_name.proto new file mode 100644 index 0000000000000..ed0e9b1af7f68 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_name.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangeFieldNameMessage { + string seventeen = 17; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_name_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_name_changed new file mode 100644 index 0000000000000..7a6e4b1819693 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_name_changed @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangeFieldNameMessage { + string twenty = 17; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_plurality.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_plurality.proto new file mode 100644 index 0000000000000..7e15732e19527 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_plurality.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangeFieldPluralityMessage { + string from = 1; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_plurality_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_plurality_changed new file mode 100644 index 0000000000000..5c11b9375ba4b --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_plurality_changed @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangeFieldPluralityMessage { + repeated string from = 1; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_to_oneof.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_to_oneof.proto new file mode 100644 index 0000000000000..008ac77757c72 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_to_oneof.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ToOneofMessage { + oneof test_oneof { + string name = 4; + string sub_message = 9; + } + float loner = 10; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_to_oneof_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_to_oneof_changed new file mode 100644 index 0000000000000..564d9dd19660c --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_to_oneof_changed @@ -0,0 +1,10 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ToOneofMessage { + oneof test_oneof { + string name = 4; + string sub_message = 9; + float loner = 10; + } +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_type.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_type.proto new file mode 100644 index 0000000000000..3762ae0600e75 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_type.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangeFieldTypeMessage { + float pi = 314; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_type_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_type_changed new file mode 100644 index 0000000000000..7ce276c75fe67 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_field_type_changed @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangeFieldTypeMessage { + string pi = 314; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_package_name.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_package_name.proto new file mode 100644 index 0000000000000..512e1c14d1b6b --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_package_name.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; +package test.protos.breaking; + +message ChangePackageNameMessage { + string passcode = 69; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_package_name_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_package_name_changed new file mode 100644 index 0000000000000..a8d583e6d1172 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_package_name_changed @@ -0,0 +1,6 @@ +syntax = "proto3"; +package wrong.package; + +message ChangePackageNameMessage { + string passcode = 69; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_field.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_field.proto new file mode 100644 index 0000000000000..14d1349b5e45f --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_field.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; +package test.protos.breaking; + +import "validate/validate.proto"; + +message ChangePGVFieldMessage { + string useremail = 1 [(validate.rules).string.email = true]; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_field_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_field_changed new file mode 100644 index 0000000000000..7c673f49cd634 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_field_changed @@ -0,0 +1,8 @@ +syntax = "proto3"; +package test.protos.breaking; + +import "validate/validate.proto"; + +message ChangePGVFieldMessage { + string useremail = 1 [(validate.rules).string.email = false]; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_message.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_message.proto new file mode 100644 index 0000000000000..107189d5aec1b --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_message.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package test.protos.breaking; + +import "validate/validate.proto"; + +message ChangePGVMessageMessage { + option (validate.disabled) = true; + + ChangePGVMessageMessage q = 543; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_message_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_message_changed new file mode 100644 index 0000000000000..6732ead334d62 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_message_changed @@ -0,0 +1,10 @@ +syntax = "proto3"; +package test.protos.breaking; + +import "validate/validate.proto"; + +message ChangePGVMessageMessage { + option (validate.disabled) = false; + + ChangePGVMessageMessage q = 543; +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_oneof.proto b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_oneof.proto new file mode 100644 index 0000000000000..b93b51a981107 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_oneof.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +package test.protos.breaking; + +import "validate/validate.proto"; + +message ChangePGVOneofMessage { + oneof test_oneof { + option (validate.required) = true; + string name = 4; + string sub_message = 9; + } +} diff --git a/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_oneof_changed b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_oneof_changed new file mode 100644 index 0000000000000..f376cd74caac9 --- /dev/null +++ b/tools/testdata/api_proto_breaking_change_detector/breaking/test_change_pgv_oneof_changed @@ -0,0 +1,12 @@ +syntax = "proto3"; +package test.protos.breaking; + +import "validate/validate.proto"; + +message ChangePGVOneofMessage { + oneof test_oneof { + option (validate.required) = false; + string name = 4; + string sub_message = 9; + } +} diff --git a/tools/testing/BUILD b/tools/testing/BUILD index ab36b91e2212c..a740e89ad5e32 100644 --- a/tools/testing/BUILD +++ b/tools/testing/BUILD @@ -24,6 +24,7 @@ envoy_py_binary( requirement("pytest"), requirement("pytest-asyncio"), requirement("pytest-cov"), + requirement("pytest-patches"), "//tools/base:runner", "//tools/base:utils", ], diff --git a/tools/testing/all_pytests.py b/tools/testing/all_pytests.py index df94b8175f871..4225add80659a 100644 --- a/tools/testing/all_pytests.py +++ b/tools/testing/all_pytests.py @@ -10,6 +10,7 @@ import os import sys from functools import cached_property +from typing import Optional from tools.base import checker, runner @@ -51,7 +52,7 @@ def add_arguments(self, parser): default=None, help="Specify a path to collect html coverage with") - def check_pytests(self) -> int: + def check_pytests(self) -> None: for target in self.pytest_targets: try: self.bazel.run(target, *self.pytest_bazel_args) @@ -71,7 +72,7 @@ def on_checks_complete(self): return super().on_checks_complete() -def main(*args: list) -> None: +def main(*args: str) -> Optional[int]: return PytestChecker(*args).run() diff --git a/tools/testing/plugin.py b/tools/testing/plugin.py index 2eb61ec6792d2..d796d210b776b 100644 --- a/tools/testing/plugin.py +++ b/tools/testing/plugin.py @@ -2,55 +2,42 @@ # This is pytest plugin providing fixtures for tests. # -from contextlib import contextmanager, ExitStack -from typing import ContextManager, Iterator -from unittest.mock import patch +import functools +from typing import Callable import pytest -@contextmanager -def nested(*contexts) -> Iterator[list]: - with ExitStack() as stack: - yield [stack.enter_context(context) for context in contexts] +def _async_command_main(patches, main: Callable, handler: str, args: tuple) -> None: + parts = handler.split(".") + patched = patches("asyncio.run", parts.pop(), prefix=".".join(parts)) + with patched as (m_run, m_handler): + assert main(*args) == m_run.return_value -def _patches(*args, prefix: str = "") -> ContextManager: - """Takes a list of module/class paths to patch and an optional prefix + assert list(m_run.call_args) == [(m_handler.return_value.run.return_value,), {}] + assert list(m_handler.call_args) == [args, {}] + assert list(m_handler.return_value.run.call_args) == [(), {}] - The prefix is used to prefix all of the paths - The patches are applied in a nested set of context managers. +def _command_main( + patches, + main: Callable, + handler: str, + args=("arg0", "arg1", "arg2"), + async_run: bool = False) -> None: + if async_run: + return _async_command_main(patches, main, handler, args=args) - The yields (mocks) are yielded as a tuple. - """ + patched = patches(handler) - patched = [] - prefix = f"{prefix}." if prefix else "" - for arg in args: - if isinstance(arg, (list, tuple)): - path, kwargs = arg - patched.append(patch(f"{prefix}{path}", **kwargs)) - else: - patched.append(patch(f"{prefix}{arg}")) - return nested(*patched) + with patched as (m_handler,): + assert main(*args) == m_handler.return_value.run.return_value - -@pytest.fixture -def patches(): - return _patches - - -def _command_main(main, handler, args=("arg0", "arg1", "arg2")): - class_mock = patch(handler) - - with class_mock as m_class: - assert (main(*args) == m_class.return_value.run.return_value) - - assert (list(m_class.call_args) == [args, {}]) - assert (list(m_class.return_value.run.call_args) == [(), {}]) + assert list(m_handler.call_args) == [args, {}] + assert list(m_handler.return_value.run.call_args) == [(), {}] @pytest.fixture -def command_main(): - return _command_main +def command_main(patches) -> Callable: + return functools.partial(_command_main, patches) diff --git a/tools/testing/requirements.txt b/tools/testing/requirements.txt index f2837acf2e191..0c4dc20def2bb 100644 --- a/tools/testing/requirements.txt +++ b/tools/testing/requirements.txt @@ -73,14 +73,14 @@ iniconfig==1.1.1 \ # -r tools/testing/requirements.txt # pytest packaging==21.0 \ - --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \ - --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 + --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ + --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 # via # -r tools/testing/requirements.txt # pytest -pluggy==0.13.1 \ - --hash=sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0 \ - --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d +pluggy==1.0.0 \ + --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 \ + --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 # via # -r tools/testing/requirements.txt # pytest @@ -104,13 +104,17 @@ pytest-cov==2.12.1 \ --hash=sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a \ --hash=sha256:261ceeb8c227b726249b376b8526b600f38667ee314f910353fa318caa01f4d7 # via -r tools/testing/requirements.txt -pytest==6.2.4 \ - --hash=sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b \ - --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 +pytest-patches==0.0.3 \ + --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a + # via -r tools/testing/requirements.txt +pytest==6.2.5 \ + --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 \ + --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 # via # -r tools/testing/requirements.txt # pytest-asyncio # pytest-cov + # pytest-patches toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f diff --git a/tools/testing/tests/test_python_pytest.py b/tools/testing/tests/test_python_pytest.py index a25464c9c3564..c1056546f29b8 100644 --- a/tools/testing/tests/test_python_pytest.py +++ b/tools/testing/tests/test_python_pytest.py @@ -81,3 +81,125 @@ def test_pytest_main(command_main): command_main( python_pytest.main, "tools.testing.python_pytest.PytestRunner") + + +def test_plugin_command_main(patches): + patched = patches( + "functools", + "_command_main", + prefix="tools.testing.plugin") + + with patched as (m_funct, m_command): + assert plugin.command_main._pytestfixturefunction.scope == "function" + assert plugin.command_main._pytestfixturefunction.autouse is False + assert ( + plugin.command_main.__pytest_wrapped__.obj(patches) + == m_funct.partial.return_value) + + assert ( + list(m_funct.partial.call_args) + == [(m_command, patches), {}]) + + +@pytest.mark.parametrize("args", [None, (), tuple(f"ARG{i}" for i in range(0, 3))]) +@pytest.mark.parametrize("async_run", [None, True, False]) +@pytest.mark.parametrize("raises", [None, "main", "handler", "run"]) +def test_plugin__command_main(patches, args, async_run, raises): + patched = patches( + "_async_command_main", + prefix="tools.testing.plugin") + _args = ("arg0", "arg1", "arg2") if args is None else args + _m_handler = MagicMock() + _patches = MagicMock() + _patches.return_value.__enter__.return_value = (_m_handler, ) + main = MagicMock() + handler = MagicMock() + kwargs = {} + if args is not None: + kwargs["args"] = args + if async_run is not None: + kwargs["async_run"] = async_run + if raises != "main": + main.return_value = _m_handler.return_value.run.return_value + if raises != "handler": + _m_handler(*_args) + else: + _m_handler("SOMETHING", "ELSE") + if raises != "run": + _m_handler.return_value.run() + else: + _m_handler.return_value.run("NOT", "RUN") + + with patched as (m_command, ): + if not raises or async_run: + result = plugin._command_main(_patches, main, handler, **kwargs) + else: + with pytest.raises(AssertionError) as e: + plugin._command_main(_patches, main, handler, **kwargs) + + if async_run: + assert result == m_command.return_value + assert ( + list(m_command.call_args) + == [(_patches, + main, + handler), + {'args': _args}]) + assert not _patches.called + assert not main.called + return + + assert not m_command.called + assert ( + list(_patches.call_args) + == [(handler,), {}]) + assert ( + list(main.call_args) + == [_args, {}]) + + if not raises: + assert not result + + +@pytest.mark.parametrize("raises", [None, "main", "aiorun", "handler", "run"]) +def test_plugin__async_command_main(raises): + _m_run = MagicMock() + _m_handler = MagicMock() + _patches = MagicMock() + _patches.return_value.__enter__.return_value = (_m_run, _m_handler) + main = MagicMock() + handler = MagicMock() + handler.split.return_value = [f"PART{i}" for i in range(0, 3)] + args = ("arg0", "arg1", "arg2") + + if raises != "main": + main.return_value = _m_run.return_value + + if raises != "aiorun": + _m_run(_m_handler.return_value.run.return_value) + else: + _m_run("NOT", "AIORUN") + if raises != "handler": + _m_handler(*args) + else: + _m_handler("SOMETHING", "ELSE") + if raises != "run": + _m_handler.return_value.run() + else: + _m_handler.return_value.run("NOT", "RUN") + + if not raises: + assert not plugin._async_command_main(_patches, main, handler, args) + else: + with pytest.raises(AssertionError): + plugin._async_command_main(_patches, main, handler, args) + + assert ( + list(_patches.call_args) + == [('asyncio.run', 'PART2'), {'prefix': 'PART0.PART1'}]) + assert ( + list(handler.split.call_args) + == [('.',), {}]) + assert ( + list(main.call_args) + == [args, {}]) diff --git a/tools/type_whisperer/proto_build_targets_gen.py b/tools/type_whisperer/proto_build_targets_gen.py index 566febc443f33..2da22439946f1 100644 --- a/tools/type_whisperer/proto_build_targets_gen.py +++ b/tools/type_whisperer/proto_build_targets_gen.py @@ -66,6 +66,14 @@ ":v3_protos", ], ) + +filegroup( + name = "proto_breaking_change_detector_buf_config", + srcs = [ + "buf.yaml", + ], + visibility = ["//visibility:public"], +) """) @@ -120,6 +128,11 @@ def is_v3_package(pkg): for desc in type_db.types.values(): pkg = desc.qualified_package if is_v3_package(pkg): + # contrib API files have the standard namespace but are in a contrib folder for clarity. + # The following prepends contrib to the package path which indirectly will produce the + # proper bazel path. + if desc.proto_path.startswith('contrib/'): + pkg = "contrib." + pkg v3_packages.add(pkg) continue if is_v2_package(pkg): diff --git a/tools/type_whisperer/typedb_gen.py b/tools/type_whisperer/typedb_gen.py index 44905c6b3e6a9..b89e3efdcbde1 100644 --- a/tools/type_whisperer/typedb_gen.py +++ b/tools/type_whisperer/typedb_gen.py @@ -181,8 +181,9 @@ def next_version_upgrade(type_name, type_map, next_version_upgrade_memo, visited type_map[type_desc.next_version_type_name].proto_path, type_map[type_desc.next_version_type_name].qualified_package) for proto_path, (next_proto_path, next_package) in sorted(next_proto_info.items()): - type_db.next_version_protos[proto_path].proto_path = next_proto_path - type_db.next_version_protos[proto_path].qualified_package = next_package + if not next_package.endswith('.v4alpha'): + type_db.next_version_protos[proto_path].proto_path = next_proto_path + type_db.next_version_protos[proto_path].qualified_package = next_package # Write out proto text. with open(out_path, 'w') as f: